diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..ed13dfa68 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,2 @@ +[report] +omit=src/yunohost/tests/*,src/yunohost/vendor/*,/usr/lib/moulinette/yunohost/* diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..c3b460087 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,4 @@ +# These are supported funding model platforms + +custom: https://donate.yunohost.org +liberapay: YunoHost diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 9642e92f6..953e2940f 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -13,10 +13,3 @@ ## How to test ... - -## Validation - -- [ ] Principle agreement 0/2 : -- [ ] Quick review 0/1 : -- [ ] Simple test 0/1 : -- [ ] Deep review 0/1 : diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 000000000..d1cb36b73 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,30 @@ +--- +stages: + - build + - install + - tests + - lint + - doc + - translation + +default: + tags: + - yunohost-ci + # All jobs are interruptible by default + interruptible: true + +# see: https://docs.gitlab.com/ee/ci/yaml/#switch-between-branch-pipelines-and-merge-request-pipelines +workflow: + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" # If we move to gitlab one day + - if: $CI_PIPELINE_SOURCE == "external_pull_request_event" # For github PR + - if: $CI_COMMIT_TAG # For tags + - if: $CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "push" # If it's not the default branch and if it's a push, then do not trigger a build + when: never + - when: always + +variables: + YNH_BUILD_DIR: "ynh-build" + +include: + - local: .gitlab/ci/*.gitlab-ci.yml diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml new file mode 100644 index 000000000..717a5ee73 --- /dev/null +++ b/.gitlab/ci/build.gitlab-ci.yml @@ -0,0 +1,54 @@ +.build-stage: + stage: build + image: "before-install" + variables: + YNH_SOURCE: "https://github.com/yunohost" + before_script: + - mkdir -p $YNH_BUILD_DIR + artifacts: + paths: + - $YNH_BUILD_DIR/*.deb + +.build_script: &build_script + - cd $YNH_BUILD_DIR/$PACKAGE + - VERSION=$(dpkg-parsechangelog -S Version 2>/dev/null) + - VERSION_NIGHTLY="${VERSION}+$(date +%Y%m%d%H%M)" + - dch --package "${PACKAGE}" --force-bad-version -v "${VERSION_NIGHTLY}" -D "unstable" --force-distribution "Daily build." + - debuild --no-lintian -us -uc + +######################################## +# BUILD DEB +######################################## + +build-yunohost: + extends: .build-stage + variables: + PACKAGE: "yunohost" + script: + - git ls-files | xargs tar -czf archive.tar.gz + - mkdir -p $YNH_BUILD_DIR/$PACKAGE + - cat archive.tar.gz | tar -xz -C $YNH_BUILD_DIR/$PACKAGE + - rm archive.tar.gz + - DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE + - *build_script + + +build-ssowat: + extends: .build-stage + variables: + PACKAGE: "ssowat" + script: + - DEBIAN_DEPENDS=$(cat debian/control | tr "," "\n" | grep -Po "ssowat \([>,=,<]+ .*\)" | grep -Po "[0-9\.]+") + - git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE --depth 1 || git clone $YNH_SOURCE/$PACKAGE -b $DEBIAN_DEPENDS $YNH_BUILD_DIR/$PACKAGE --depth 1 || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE --depth 1 + - DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE + - *build_script + +build-moulinette: + extends: .build-stage + variables: + PACKAGE: "moulinette" + script: + - DEBIAN_DEPENDS=$(cat debian/control | tr "," "\n" | grep -Po "moulinette \([>,=,<]+ .*\)" | grep -Po "[0-9\.]+") + - git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE --depth 1 || git clone $YNH_SOURCE/$PACKAGE -b $DEBIAN_DEPENDS $YNH_BUILD_DIR/$PACKAGE --depth 1 || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE --depth 1 + - DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE + - *build_script diff --git a/.gitlab/ci/doc.gitlab-ci.yml b/.gitlab/ci/doc.gitlab-ci.yml new file mode 100644 index 000000000..59179f7a7 --- /dev/null +++ b/.gitlab/ci/doc.gitlab-ci.yml @@ -0,0 +1,27 @@ +######################################## +# DOC +######################################## + +generate-helpers-doc: + stage: doc + image: "before-install" + needs: [] + before_script: + - apt-get update -y && apt-get install git hub -y + - git config --global user.email "yunohost@yunohost.org" + - git config --global user.name "$GITHUB_USER" + script: + - cd doc + - python3 generate_helper_doc.py + - hub clone https://$GITHUB_TOKEN:x-oauth-basic@github.com/YunoHost/doc.git doc_repo + - cp helpers.md doc_repo/pages/04.contribute/04.packaging_apps/11.helpers/packaging_apps_helpers.md + - cd doc_repo + # replace ${CI_COMMIT_REF_NAME} with ${CI_COMMIT_TAG} ? + - hub checkout -b "${CI_COMMIT_REF_NAME}" + - hub commit -am "[CI] Helper for ${CI_COMMIT_REF_NAME}" + - hub pull-request -m "[CI] Helper for ${CI_COMMIT_REF_NAME}" -p # GITHUB_USER and GITHUB_TOKEN registered here https://gitlab.com/yunohost/yunohost/-/settings/ci_cd + artifacts: + paths: + - doc/helpers.md + only: + - tags diff --git a/.gitlab/ci/install.gitlab-ci.yml b/.gitlab/ci/install.gitlab-ci.yml new file mode 100644 index 000000000..e2662e9e2 --- /dev/null +++ b/.gitlab/ci/install.gitlab-ci.yml @@ -0,0 +1,29 @@ +.install-stage: + stage: install + needs: + - job: build-yunohost + artifacts: true + - job: build-ssowat + artifacts: true + - job: build-moulinette + artifacts: true + +######################################## +# INSTALL DEB +######################################## + +upgrade: + extends: .install-stage + image: "after-install" + script: + - apt-get update -o Acquire::Retries=3 + - DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb + + +install-postinstall: + extends: .install-stage + image: "before-install" + script: + - apt-get update -o Acquire::Retries=3 + - DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb + - yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns --force-diskspace diff --git a/.gitlab/ci/lint.gitlab-ci.yml b/.gitlab/ci/lint.gitlab-ci.yml new file mode 100644 index 000000000..aaddb5a0a --- /dev/null +++ b/.gitlab/ci/lint.gitlab-ci.yml @@ -0,0 +1,57 @@ +######################################## +# LINTER +######################################## +# later we must fix lint and format-check jobs and remove "allow_failure" + +--- +lint37: + stage: lint + image: "before-install" + needs: [] + allow_failure: true + script: + - tox -e py37-lint + +invalidcode37: + stage: lint + image: "before-install" + needs: [] + script: + - tox -e py37-invalidcode + +mypy: + stage: lint + image: "before-install" + needs: [] + script: + - tox -e py37-mypy + +format-check: + stage: lint + image: "before-install" + allow_failure: true + needs: [] + script: + - tox -e py37-black-check + +format-run: + stage: lint + image: "before-install" + needs: [] + before_script: + - apt-get update -y && apt-get install git hub -y + - git config --global user.email "yunohost@yunohost.org" + - git config --global user.name "$GITHUB_USER" + - hub clone --branch ${CI_COMMIT_REF_NAME} "https://$GITHUB_TOKEN:x-oauth-basic@github.com/YunoHost/yunohost.git" github_repo + - cd github_repo + script: + # create a local branch that will overwrite distant one + - git checkout -b "ci-format-${CI_COMMIT_REF_NAME}" --no-track + - tox -e py37-black-run + - '[ $(git diff | wc -l) != 0 ] || exit 0' # stop if there is nothing to commit + - git commit -am "[CI] Format code" || true + - git push -f origin "ci-format-${CI_COMMIT_REF_NAME}":"ci-format-${CI_COMMIT_REF_NAME}" + - hub pull-request -m "[CI] Format code" -b Yunohost:dev -p || true # GITHUB_USER and GITHUB_TOKEN registered here https://gitlab.com/yunohost/yunohost/-/settings/ci_cd + only: + refs: + - dev diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml new file mode 100644 index 000000000..b3aea606f --- /dev/null +++ b/.gitlab/ci/test.gitlab-ci.yml @@ -0,0 +1,208 @@ +.install_debs: &install_debs + - apt-get update -o Acquire::Retries=3 + - DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb + +.test-stage: + stage: tests + image: "after-install" + variables: + PYTEST_ADDOPTS: "--color=yes" + before_script: + - *install_debs + cache: + paths: + - src/yunohost/tests/apps + key: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG" + needs: + - job: build-yunohost + artifacts: true + - job: build-ssowat + artifacts: true + - job: build-moulinette + artifacts: true + - job: upgrade + + +######################################## +# TESTS +######################################## + +full-tests: + stage: tests + image: "before-install" + variables: + PYTEST_ADDOPTS: "--color=yes" + before_script: + - *install_debs + - yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns --force-diskspace + script: + - python3 -m pytest --cov=yunohost tests/ src/yunohost/tests/ --junitxml=report.xml + - cd tests + - bash test_helpers.sh + needs: + - job: build-yunohost + artifacts: true + - job: build-ssowat + artifacts: true + - job: build-moulinette + artifacts: true + artifacts: + reports: + junit: report.xml + +test-i18n-keys: + extends: .test-stage + script: + - python3 -m pytest tests/test_i18n_keys.py + only: + changes: + - locales/en.json + - src/yunohost/*.py + - data/hooks/diagnosis/*.py + +test-translation-format-consistency: + extends: .test-stage + script: + - python3 -m pytest tests/test_translation_format_consistency.py + only: + changes: + - locales/* + +test-actionmap: + extends: .test-stage + script: + - python3 -m pytest tests/test_actionmap.py + only: + changes: + - data/actionsmap/*.yml + +test-helpers: + extends: .test-stage + script: + - cd tests + - bash test_helpers.sh + only: + changes: + - data/helpers.d/* + +test-domains: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_domains.py + only: + changes: + - src/yunohost/domain.py + +test-dns: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_dns.py + only: + changes: + - src/yunohost/dns.py + - src/yunohost/utils/dns.py + +test-apps: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_apps.py + only: + changes: + - src/yunohost/app.py + +test-appscatalog: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_appscatalog.py + only: + changes: + - src/yunohost/app.py + +test-appurl: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_appurl.py + only: + changes: + - src/yunohost/app.py + +test-questions: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_questions.py + only: + changes: + - src/yunohost/utils/config.py + +test-app-config: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_app_config.py + only: + changes: + - src/yunohost/app.py + - src/yunohost/utils/config.py + +test-changeurl: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_changeurl.py + only: + changes: + - src/yunohost/app.py + +test-backuprestore: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_backuprestore.py + only: + changes: + - src/yunohost/backup.py + +test-permission: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_permission.py + only: + changes: + - src/yunohost/permission.py + +test-settings: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_settings.py + only: + changes: + - src/yunohost/settings.py + +test-user-group: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_user-group.py + only: + changes: + - src/yunohost/user.py + +test-regenconf: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_regenconf.py + only: + changes: + - src/yunohost/regenconf.py + +test-service: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_service.py + only: + changes: + - src/yunohost/service.py + +test-ldapauth: + extends: .test-stage + script: + - python3 -m pytest src/yunohost/tests/test_ldapauth.py + only: + changes: + - src/yunohost/authenticators/*.py diff --git a/.gitlab/ci/translation.gitlab-ci.yml b/.gitlab/ci/translation.gitlab-ci.yml new file mode 100644 index 000000000..41e8c82d2 --- /dev/null +++ b/.gitlab/ci/translation.gitlab-ci.yml @@ -0,0 +1,29 @@ +######################################## +# TRANSLATION +######################################## + +autofix-translated-strings: + stage: translation + image: "before-install" + needs: [] + before_script: + - apt-get update -y && apt-get install git hub -y + - git config --global user.email "yunohost@yunohost.org" + - git config --global user.name "$GITHUB_USER" + - git remote set-url origin https://$GITHUB_TOKEN:x-oauth-basic@github.com/YunoHost/yunohost.git + script: + - cd tests # Maybe move this script location to another folder? + # create a local branch that will overwrite distant one + - git checkout -b "ci-autofix-translated-strings-${CI_COMMIT_REF_NAME}" --no-track + - python3 remove_stale_translated_strings.py + - python3 autofix_locale_format.py + - python3 reformat_locales.py + - '[ $(git diff -w | wc -l) != 0 ] || exit 0' # stop if there is nothing to commit + - git commit -am "[CI] Reformat / remove stale translated strings" || true + - git push -f origin "HEAD":"ci-remove-stale-translated-strings-${CI_COMMIT_REF_NAME}" + - hub pull-request -m "[CI] Reformat / remove stale translated strings" -b Yunohost:dev -p || true # GITHUB_USER and GITHUB_TOKEN registered here https://gitlab.com/yunohost/yunohost/-/settings/ci_cd + only: + variables: + - $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + changes: + - locales/* diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 25fe0e5fc..000000000 --- a/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: python -install: "pip install pytest pyyaml" -python: - - "2.7" -script: "py.test tests" diff --git a/README.md b/README.md index 4bd070bea..9fc93740d 100644 --- a/README.md +++ b/README.md @@ -1,42 +1,43 @@ -# YunoHost core +
+
+
+
+
- {% if not '\n' in h.usage %}
- Usage: {{ h.usage }}
- {% else %}
- Usage: {{ h.usage }}
- {% endif %}
-
- Arguments: -
{{ infos[0] }}
: {{ infos[1] }}{{ infos[0] }}
, {{ infos[1] }}
: {{ infos[2] }}- Returns: {{ h.ret }} -
- {% endif %} - {% if "example" in h.keys() %} -
- Example: {{ h.example }}
-
- Examples:
{{ example }}
- {% else %}
- {{ example.strip("# ") }}
- {% endif %}
- - Details: -
- {{ h.details.replace('\n', '') }} -
- - {% endif %} - - -Generated by this script on {{data.date}} (Yunohost version {{data.version}})
- - - diff --git a/doc/helper_doc_template.md b/doc/helper_doc_template.md new file mode 100644 index 000000000..d41c0b6e9 --- /dev/null +++ b/doc/helper_doc_template.md @@ -0,0 +1,59 @@ +--- +title: App helpers +template: docs +taxonomy: + category: docs +routes: + default: '/packaging_apps_helpers' +--- + +Doc auto-generated by [this script](https://github.com/YunoHost/yunohost/blob/{{ current_commit }}/doc/generate_helper_doc.py) on {{data.date}} (YunoHost version {{data.version}}) + +{% for category, helpers in data.helpers %} +## {{ category.upper() }} +{% for h in helpers %} +#### {{ h.name }} +[details summary="{{ h.brief }}" class="helper-card-subtitle text-muted"] + +**Usage**: `{{ h.usage }}` +{%- if h.args %} + +**Arguments**: + {%- for infos in h.args %} + {%- if infos|length == 2 %} +- `{{ infos[0] }}`: {{ infos[1] }} + {%- else %} +- `{{ infos[0] }}`, `{{ infos[1] }}`: {{ infos[2] }} + {%- endif %} + {%- endfor %} +{%- endif %} +{%- if h.ret %} + +**Returns**: {{ h.ret }} +{%- endif %} +{%- if "example" in h.keys() %} + +**Example**: `{{ h.example }}` +{%- endif %} +{%- if "examples" in h.keys() %} + +**Examples**: + {% for example in h.examples %} + {% if not example.strip().startswith("# ") %} +- `{{ example }}` + {% else %} +- `{{ example.strip("# ") }}` + {% endif %} + {% endfor %} +{%- endif %} +{%- if h.details %} + +**Details**:{file}
sembla haver estat modificat manualment.",
+ "diagnosis_security_vulnerable_to_meltdown_details": "Per arreglar-ho, hauríeu d'actualitzar i reiniciar el sistema per tal de carregar el nou nucli de linux (o contactar amb el proveïdor del servidor si no funciona). Vegeu https://meltdownattack.com/ per a més informació.",
+ "diagnosis_http_could_not_diagnose": "No s'ha pogut diagnosticar si el domini és accessible des de l'exterior.",
+ "diagnosis_http_could_not_diagnose_details": "Error: {error}",
+ "domain_cannot_remove_main_add_new_one": "No es pot eliminar «{domain}» ja que és el domini principal i únic domini, primer s'ha d'afegir un altre domini utilitzant «yunohost domain add /etc/resolv.conf
no apunta cap a 127.0.0.1
.",
+ "diagnosis_ip_weird_resolvconf": "La resolució DNS sembla estar funcionant, però sembla que esteu utilitzant un versió personalitzada de /etc/resolv.conf
.",
+ "diagnosis_ip_weird_resolvconf_details": "El fitxer etc/resolv.conf
hauria de ser un enllaç simbòlic cap a /etc/resolvconf/run/resolv.conf
i que aquest apunti cap a 127.0.0.1
(dnsmasq). La configuració del «resolver» real s'hauria de fer a /etc/resolv.dnsmaq.conf
.",
+ "diagnosis_dns_good_conf": "Els registres DNS han estat correctament configurats pel domini {domain} (categoria {category})",
+ "diagnosis_dns_bad_conf": "Alguns registres DNS són incorrectes o no existeixen pel domini {domain} (categoria {category})",
+ "diagnosis_dns_missing_record": "Segons la configuració DNS recomanada, hauríeu d'afegir un registre DNS amb la següent informació.{type}
{name}
{value}
",
+ "diagnosis_dns_discrepancy": "La configuració DNS següent sembla que no segueix la configuració recomanada: {type}
{name}
{current}
{value}
",
+ "diagnosis_services_bad_status": "El servei {service} està {status} :(",
+ "diagnosis_diskusage_verylow": "El lloc d'emmagatzematge {mountpoint}
(en l'aparell {device}
) només té disponibles {free} ({free_percent}%). Hauríeu de considerar alliberar una mica d'espai!",
+ "diagnosis_diskusage_low": "El lloc d'emmagatzematge {mountpoint}
(en l'aparell {device}
) només té disponibles {free} ({free_percent}%). Aneu amb compte.",
+ "diagnosis_diskusage_ok": "El lloc d'emmagatzematge {mountpoint}
(en l'aparell {device}
) encara té {free} ({free_percent}%) lliures!",
+ "diagnosis_ram_verylow": "El sistema només té {available} ({available_percent}%) de memòria RAM disponibles! (d'un total de {total})",
+ "diagnosis_ram_ok": "El sistema encara té {available} ({available_percent}%) de memòria RAM disponibles d'un total de {total}.",
+ "diagnosis_swap_notsomuch": "El sistema només té {total} de swap. Hauríeu de considerar tenir un mínim de {recommended} per evitar situacions en les que el sistema es queda sense memòria.",
+ "diagnosis_swap_ok": "El sistema té {total} de swap!",
+ "diagnosis_regenconf_allgood": "Tots els fitxers de configuració estan en acord amb la configuració recomanada!",
+ "diagnosis_regenconf_manually_modified_details": "No hauria de ser cap problema sempre i quan sapigueu el que esteu fent! YunoHost deixarà d'actualitzar aquest fitxer de manera automàtica… Però tingueu en compte que les actualitzacions de YunoHost podrien tenir canvis recomanats importants. Si voleu podeu mirar les diferències amb {global}
",
+ "diagnosis_ip_local": "IP local: {local}
",
+ "diagnosis_dns_point_to_doc": "Consulteu la documentació a https://yunohost.org/dns_config si necessiteu ajuda per configurar els registres DNS.",
+ "diagnosis_mail_outgoing_port_25_ok": "El servidor de correu electrònic SMTP pot enviar correus electrònics (el port de sortida 25 no està bloquejat).",
+ "diagnosis_mail_outgoing_port_25_blocked_details": "Primer heu d'intentar desbloquejar el port 25 en la interfície del vostre router o en la interfície del vostre allotjador. (Alguns proveïdors d'allotjament demanen enviar un tiquet de suport en aquests casos).",
+ "diagnosis_mail_ehlo_ok": "El servidor de correu electrònic SMTP és accessible des de l'exterior i per tant pot rebre correus electrònics!",
+ "diagnosis_mail_ehlo_unreachable": "El servidor de correu electrònic SMTP no és accessible des de l'exterior amb IPv{ipversion}. No podrà rebre correus electrònics.",
+ "diagnosis_mail_ehlo_bad_answer": "Un servei no SMTP a respost en el port 25 amb IPv{ipversion}",
+ "diagnosis_mail_ehlo_bad_answer_details": "Podria ser que sigui per culpa d'una altra màquina responent en lloc del servidor.",
+ "diagnosis_mail_ehlo_wrong": "Un servidor de correu electrònic SMTP diferent respon amb IPv{ipversion}. És probable que el vostre servidor no pugui rebre correus electrònics.",
+ "diagnosis_mail_ehlo_could_not_diagnose": "No s'ha pogut diagnosticar si el servidor de correu electrònic postfix és accessible des de l'exterior amb IPv{ipversion}.",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "Error: {error}",
+ "diagnosis_mail_fcrdns_ok": "S'ha configurat correctament el servidor DNS invers!",
+ "diagnosis_mail_blacklist_ok": "Sembla que les IPs i el dominis d'aquest servidor no són en una llista negra",
+ "diagnosis_mail_blacklist_listed_by": "La vostra IP o domini {item}
està en una llista negra a {blacklist_name}",
+ "diagnosis_mail_blacklist_reason": "El motiu de ser a la llista negra és: {reason}",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain": "El DNS invers no està correctament configurat amb IPv{ipversion}. Alguns correus electrònics poden no arribar al destinatari o ser marcats com correu brossa.",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "DNS invers actual: {rdns_domain}
{ehlo_domain}
",
+ "diagnosis_mail_queue_ok": "{nb_pending} correus electrònics pendents en les cues de correu electrònic",
+ "diagnosis_mail_queue_unavailable": "No s'ha pogut consultar el nombre de correus electrònics pendents en la cua",
+ "diagnosis_mail_queue_unavailable_details": "Error: {error}",
+ "diagnosis_mail_queue_too_big": "Hi ha massa correus electrònics pendents en la cua ({nb_pending} correus electrònics)",
+ "diagnosis_http_hairpinning_issue": "Sembla que la vostra xarxa no té el hairpinning activat.",
+ "diagnosis_http_nginx_conf_not_up_to_date": "La configuració NGINX d'aquest domini sembla que ha estat modificada manualment, i no deixa que YunoHost diagnostiqui si és accessible amb HTTP.",
+ "diagnosis_http_nginx_conf_not_up_to_date_details": "Per arreglar el problema, mireu les diferències amb la línia d'ordres utilitzant {wrong_ehlo}
{right_ehlo}
{ehlo_domain}
en la interfície del router o en la interfície del vostre allotjador. (Alguns allotjadors requereixen que obris un informe de suport per això).",
+ "diagnosis_mail_fcrdns_nok_alternatives_4": "Alguns proveïdors no permeten configurar el DNS invers (o aquesta funció pot no funcionar…). Si teniu problemes a causa d'això, considereu les solucions següents:/etc/resolv.conf
kein Eintrag auf 127.0.0.1
zeigt.",
+ "diagnosis_ip_weird_resolvconf_details": "Die Datei /etc/resolv.conf
muss ein Symlink auf /etc/resolvconf/run/resolv.conf
sein, welcher auf 127.0.0.1
(dnsmasq) zeigt. Falls Sie die DNS-Resolver manuell konfigurieren möchten, bearbeiten Sie bitte /etc/resolv.dnsmasq.conf
.",
+ "diagnosis_dns_good_conf": "Die DNS-Einträge für die Domäne {domain} (Kategorie {category}) sind korrekt konfiguriert",
+ "diagnosis_ignored_issues": "(+ {nb_ignored} ignorierte(s) Problem(e))",
+ "diagnosis_basesystem_hardware": "Server Hardware Architektur ist {virt} {arch}",
+ "diagnosis_found_errors": "Habe {errors} erhebliche(s) Problem(e) in Verbindung mit {category} gefunden!",
+ "diagnosis_found_warnings": "Habe {warnings} Ding(e) gefunden, die verbessert werden könnten für {category}.",
+ "diagnosis_ip_dnsresolution_working": "Domänen-Namens-Auflösung funktioniert!",
+ "diagnosis_ip_weird_resolvconf": "DNS Auflösung scheint zu funktionieren, aber seien Sie vorsichtig wenn Sie Ihren eigenen /etc/resolv.conf
verwenden.",
+ "diagnosis_display_tip": "Um die gefundenen Probleme zu sehen, können Sie zum Diagnose-Bereich des webadmin gehen, oder 'yunohost diagnosis show --issues --human-readable' in der Kommandozeile ausführen.",
+ "backup_archive_corrupted": "Das Backup-Archiv '{archive}' scheint beschädigt: {error}",
+ "backup_archive_cant_retrieve_info_json": "Die Informationen für das Archiv '{archive}' konnten nicht geladen werden... Die Datei info.json wurde nicht gefunden (oder ist kein gültiges json).",
+ "app_packaging_format_not_supported": "Diese App kann nicht installiert werden da das Paketformat nicht von der YunoHost-Version unterstützt wird. Denken Sie darüber nach das System zu aktualisieren.",
+ "certmanager_domain_not_diagnosed_yet": "Für die Domain {domain} gibt es noch keine Diagnose-Resultate. Bitte widerhole die Diagnose für die Kategorien 'DNS records' und 'Web' im Diagnose-Bereich um zu überprüfen ob die Domain für Let's Encrypt bereit ist. (Wenn du weißt was du tust, kannst du --no-checks benutzen, um diese Überprüfung zu überspringen.)",
+ "migration_0015_patching_sources_list": "sources.lists wird repariert...",
+ "migration_0015_start": "Start der Migration auf Buster",
+ "migration_description_0015_migrate_to_buster": "Auf Debian Buster und YunoHost 4.x upgraden",
+ "mail_unavailable": "Diese E-Mail Adresse ist reserviert und wird dem ersten Benutzer automatisch zugewiesen",
+ "diagnosis_services_conf_broken": "Die Konfiguration für den Dienst {service} ist fehlerhaft!",
+ "diagnosis_services_running": "Dienst {service} läuft!",
+ "diagnosis_domain_expires_in": "{domain} läuft in {days} Tagen ab.",
+ "diagnosis_domain_expiration_error": "Einige Domänen werden SEHR BALD ablaufen!",
+ "diagnosis_domain_expiration_success": "Ihre Domänen sind registriert und werden in nächster Zeit nicht ablaufen.",
+ "diagnosis_domain_not_found_details": "Die Domäne {domain} existiert nicht in der WHOIS-Datenbank oder sie ist abgelaufen!",
+ "diagnosis_domain_expiration_not_found": "Das Ablaufdatum einiger Domains kann nicht überprüft werden",
+ "diagnosis_dns_try_dyndns_update_force": "Die DNS-Konfiguration dieser Domain sollte automatisch von YunoHost verwaltet werden. Andernfalls können Sie mittels {type}
{name}
{current}
{value}
",
+ "diagnosis_dns_missing_record": "Gemäß der empfohlenen DNS-Konfiguration sollten Sie einen DNS-Eintrag mit den folgenden Informationen hinzufügen.{type}
{name}
{value}
",
+ "diagnosis_dns_bad_conf": "Einige DNS-Einträge für die Domäne {domain} fehlen oder sind nicht korrekt (Kategorie {category})",
+ "diagnosis_ip_local": "Lokale IP: {local}
",
+ "diagnosis_ip_global": "Globale IP: {global}
",
+ "diagnosis_ip_no_ipv6_tip": "Die Verwendung von IPv6 ist nicht Voraussetzung für das Funktionieren Ihres Servers, trägt aber zur Gesundheit des Internet als Ganzes bei. IPv6 sollte normalerweise automatisch von Ihrem Server oder Ihrem Provider konfiguriert werden, sofern verfügbar. Andernfalls müßen Sie einige Dinge manuell konfigurieren. Weitere Informationen finden Sie hier: https://yunohost.org/#/ipv6. Wenn Sie IPv6 nicht aktivieren können oder Ihnen das zu technisch ist, können Sie diese Warnung gefahrlos ignorieren.",
+ "diagnosis_services_bad_status_tip": "Sie können versuchen, den Dienst neu zu starten, und wenn das nicht funktioniert, schauen Sie sich die (Dienst-)Logs in der Verwaltung an (In der Kommandozeile können Sie dies mit {mountpoint}
(auf Gerät {device}
) hat nur noch {free} ({free_percent}%) freien Speicherplatz (von ingesamt {total}). Sie sollten sich ernsthaft überlegen, einigen Seicherplatz frei zu machen!",
+ "diagnosis_http_ok": "Die Domäne {domain} ist über HTTP von außerhalb des lokalen Netzwerks erreichbar.",
+ "diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Einige Hosting-Anbieter werden es Ihnen nicht gestatten, den ausgehenden Port 25 zu öffnen, da diese sich nicht um die Netzneutralität kümmern.{mountpoint}
(auf Gerät {device}
) hat nur noch {free} ({free_percent}%) freien Speicherplatz (von insgesamt {total}). Seien Sie vorsichtig.",
+ "diagnosis_ram_low": "Das System hat nur {available} ({available_percent}%) RAM zur Verfügung! (von insgesamt {total}). Seien Sie vorsichtig.",
+ "service_reload_or_restart_failed": "Der Dienst '{service}' konnte nicht erneut geladen oder gestartet werden.\n\nKürzlich erstellte Logs des Dienstes: {logs}",
+ "diagnosis_domain_expiration_not_found_details": "Die WHOIS-Informationen für die Domäne {domain} scheint keine Informationen über das Ablaufdatum zu enthalten?",
+ "diagnosis_domain_expiration_warning": "Einige Domänen werden bald ablaufen!",
+ "diagnosis_diskusage_ok": "Der Speicher {mountpoint}
(auf Gerät {device}
) hat immer noch {free} ({free_percent}%) freien Speicherplatz übrig(von insgesamt {total})!",
+ "diagnosis_ram_ok": "Das System hat immer noch {available} ({available_percent}%) RAM zu Verfügung von {total}.",
+ "diagnosis_swap_none": "Das System hat gar keinen Swap. Sie sollten sich überlegen mindestens {recommended} an Swap einzurichten, um Situationen zu verhindern, in welchen der RAM des Systems knapp wird.",
+ "diagnosis_mail_ehlo_unreachable_details": "Konnte keine Verbindung zu Ihrem Server auf dem Port 25 herzustellen per IPv{ipversion}. Er scheint nicht erreichbar zu sein.{ehlo_domain}
zu konfigurieren. (Gewisse Hosting-Anbieter können dafür möglicherweise verlangen, dass Sie dafür ein Support-Ticket erstellen).",
+ "diagnosis_mail_fcrdns_dns_missing": "Es wurde kein Reverse-DNS-Eintrag definiert für IPv{ipversion}. Einige E-Mails könnten möglicherweise zurückgewiesen oder als Spam markiert werden.",
+ "diagnosis_mail_fcrdns_ok": "Ihr Reverse-DNS-Eintrag ist korrekt konfiguriert!",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "Fehler: {error}",
+ "diagnosis_mail_ehlo_could_not_diagnose": "Konnte nicht überprüfen, ob der Postfix-Mail-Server von aussen per IPv{ipversion} erreichbar ist.",
+ "diagnosis_mail_ehlo_wrong_details": "Die vom Remote-Diagnose-Server per IPv{ipversion} empfangene EHLO weicht von der Domäne Ihres Servers ab. {wrong_ehlo}
{right_ehlo}
{item}
ist auf der Blacklist auf {blacklist_name}",
+ "diagnosis_mail_blacklist_ok": "Die IP-Adressen und die Domänen, welche von diesem Server verwendet werden, scheinen nicht auf einer Blacklist zu sein",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "Aktueller Reverse-DNS-Eintrag: {rdns_domain}
{ehlo_domain}
",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain": "Der Reverse-DNS-Eintrag für IPv{ipversion} ist nicht korrekt konfiguriert. Einige E-Mails könnten abgewiesen oder als Spam markiert werden.",
+ "diagnosis_mail_fcrdns_nok_alternatives_6": "Einige Provider werden es Ihnen nicht erlauben, Ihren Reverse-DNS-Eintrag zu konfigurieren (oder ihre Funktionalität könnte defekt sein ...). Falls Ihr Reverse-DNS-Eintrag für IPv4 korrekt konfiguiert ist, können Sie versuchen, die Verwendung von IPv6 für das Versenden von E-Mails auszuschalten, indem Sie den Befehl {file}
scheint manuell verändert worden zu sein.",
+ "diagnosis_regenconf_allgood": "Alle Konfigurationsdateien stimmen mit der empfohlenen Konfiguration überein!",
+ "diagnosis_package_installed_from_sury": "Einige System-Pakete sollten gedowngradet werden",
+ "diagnosis_ports_forwarding_tip": "Um dieses Problem zu beheben, müssen Sie höchst wahrscheinlich die Port-Weiterleitung auf Ihrem Internet-Router einrichten wie in https://yunohost.org/isp_box_config beschrieben",
+ "diagnosis_regenconf_manually_modified_details": "Das ist wahrscheinlich OK wenn Sie wissen, was Sie tun! YunoHost wird in Zukunft diese Datei nicht mehr automatisch updaten... Aber seien Sie bitte vorsichtig, da die zukünftigen Upgrades von YunoHost wichtige empfohlene Änderungen enthalten könnten. Falls Sie möchten, können Sie die Unterschiede mit {mountpoint}
(on device {device}
) has only {free} ({free_percent}%) space remaining (out of {total}). Be careful.",
+ "diagnosis_diskusage_ok": "Storage {mountpoint}
(on device {device}
) still has {free} ({free_percent}%) space left (out of {total})!",
+ "diagnosis_diskusage_verylow": "Storage {mountpoint}
(on device {device}
) has only {free} ({free_percent}%) space remaining (out of {total}). You should really consider cleaning up some space!",
+ "diagnosis_display_tip": "To see the issues found, you can go to the Diagnosis section of the webadmin, or run 'yunohost diagnosis show --issues --human-readable' from the command-line.",
+ "diagnosis_dns_bad_conf": "Some DNS records are missing or incorrect for domain {domain} (category {category})",
+ "diagnosis_dns_discrepancy": "The following DNS record does not seem to follow the recommended configuration:{type}
{name}
{current}
{value}
",
+ "diagnosis_dns_good_conf": "DNS records are correctly configured for domain {domain} (category {category})",
+ "diagnosis_dns_missing_record": "According to the recommended DNS configuration, you should add a DNS record with the following info.{type}
{name}
{value}
",
+ "diagnosis_dns_point_to_doc": "Please check the documentation at https://yunohost.org/dns_config if you need help about configuring DNS records.",
+ "diagnosis_dns_specialusedomain": "Domain {domain} is based on a special-use top-level domain (TLD) and is therefore not expected to have actual DNS records.",
+ "diagnosis_dns_try_dyndns_update_force": "This domain's DNS configuration should automatically be managed by YunoHost. If that's not the case, you can try to force an update using /etc/resolv.conf
not pointing to 127.0.0.1
.",
+ "diagnosis_ip_connected_ipv4": "The server is connected to the Internet through IPv4!",
+ "diagnosis_ip_connected_ipv6": "The server is connected to the Internet through IPv6!",
+ "diagnosis_ip_dnsresolution_working": "Domain name resolution is working!",
+ "diagnosis_ip_global": "Global IP: {global}
",
+ "diagnosis_ip_local": "Local IP: {local}
",
+ "diagnosis_ip_no_ipv4": "The server does not have working IPv4.",
+ "diagnosis_ip_no_ipv6": "The server does not have working IPv6.",
+ "diagnosis_ip_no_ipv6_tip": "Having a working IPv6 is not mandatory for your server to work, but it is better for the health of the Internet as a whole. IPv6 should usually be automatically configured by the system or your provider if it's available. Otherwise, you might need to configure a few things manually as explained in the documentation here: https://yunohost.org/#/ipv6. If you cannot enable IPv6 or if it seems too technical for you, you can also safely ignore this warning.",
+ "diagnosis_ip_not_connected_at_all": "The server does not seem to be connected to the Internet at all!?",
+ "diagnosis_ip_weird_resolvconf": "DNS resolution seems to be working, but it looks like you're using a custom /etc/resolv.conf
.",
+ "diagnosis_ip_weird_resolvconf_details": "The file /etc/resolv.conf
should be a symlink to /etc/resolvconf/run/resolv.conf
itself pointing to 127.0.0.1
(dnsmasq). If you want to manually configure DNS resolvers, please edit /etc/resolv.dnsmasq.conf
.",
+ "diagnosis_mail_blacklist_listed_by": "Your IP or domain {item}
is blacklisted on {blacklist_name}",
+ "diagnosis_mail_blacklist_ok": "The IPs and domains used by this server do not appear to be blacklisted",
+ "diagnosis_mail_blacklist_reason": "The blacklist reason is: {reason}",
+ "diagnosis_mail_blacklist_website": "After identifying why you are listed and fixed it, feel free to ask for your IP or domaine to be removed on {blacklist_website}",
+ "diagnosis_mail_ehlo_bad_answer": "A non-SMTP service answered on port 25 on IPv{ipversion}",
+ "diagnosis_mail_ehlo_bad_answer_details": "It could be due to an other machine answering instead of your server.",
+ "diagnosis_mail_ehlo_could_not_diagnose": "Could not diagnose if postfix mail server is reachable from outside in IPv{ipversion}.",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "Error: {error}",
+ "diagnosis_mail_ehlo_ok": "The SMTP mail server is reachable from the outside and therefore is able to receive emails!",
+ "diagnosis_mail_ehlo_unreachable": "The SMTP mail server is unreachable from the outside on IPv{ipversion}. It won't be able to receive emails.",
+ "diagnosis_mail_ehlo_unreachable_details": "Could not open a connection on port 25 to your server in IPv{ipversion}. It appears to be unreachable.{wrong_ehlo}
{right_ehlo}
{rdns_domain}
{ehlo_domain}
",
+ "diagnosis_mail_fcrdns_dns_missing": "No reverse DNS is defined in IPv{ipversion}. Some emails may fail to get delivered or may get flagged as spam.",
+ "diagnosis_mail_fcrdns_nok_alternatives_4": "Some providers won't let you configure your reverse DNS (or their feature might be broken...). If you are experiencing issues because of this, consider the following solutions:{ehlo_domain}
in your internet router interface or your hosting provider interface. (Some hosting provider may require you to send them a support ticket for this).",
+ "diagnosis_mail_fcrdns_ok": "Your reverse DNS is correctly configured!",
+ "diagnosis_mail_outgoing_port_25_blocked": "The SMTP mail server cannot send emails to other servers because outgoing port 25 is blocked in IPv{ipversion}.",
+ "diagnosis_mail_outgoing_port_25_blocked_details": "You should first try to unblock outgoing port 25 in your internet router interface or your hosting provider interface. (Some hosting provider may require you to send them a support ticket for this).",
+ "diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Some providers won't let you unblock outgoing port 25 because they don't care about Net Neutrality.{file}
appears to have been manually modified.",
+ "diagnosis_regenconf_manually_modified_details": "This is probably OK if you know what you're doing! YunoHost will stop updating this file automatically... But beware that YunoHost upgrades could contain important recommended changes. If you want to, you can inspect the differences with {mountpoint}
(sur aparato {device}
) nur restas {free} ({free_percent}%) spaco restanta (el {total}). Vi vere konsideru purigi iom da spaco !",
+ "diagnosis_ram_verylow": "La sistemo nur restas {available} ({available_percent}%) RAM! (el {total})",
+ "diagnosis_mail_outgoing_port_25_blocked": "Eliranta haveno 25 ŝajnas esti blokita. Vi devas provi malŝlosi ĝin en via agorda panelo de provizanto (aŭ gastiganto). Dume la servilo ne povos sendi retpoŝtojn al aliaj serviloj.",
+ "diagnosis_http_bad_status_code": "Ĝi aspektas kiel alia maŝino (eble via interreta enkursigilo) respondita anstataŭ via servilo./etc/resolv.conf
.",
+ "diagnosis_ip_weird_resolvconf_details": "La dosiero /etc/resolv.conf
devas esti ligilo al /etc/resolvconf/run/resolv.conf
indikante 127.0.0.1
(dnsmasq). Se vi volas permane agordi DNS-solvilojn, bonvolu redakti /etc/resolv.dnsmasq.conf
.",
+ "diagnosis_dns_good_conf": "DNS-registroj estas ĝuste agorditaj por domajno {domain} (kategorio {category})",
+ "diagnosis_dns_bad_conf": "Iuj DNS-registroj mankas aŭ malĝustas por domajno {domain} (kategorio {category})",
+ "diagnosis_ram_ok": "La sistemo ankoraŭ havas {available} ({available_percent}%) RAM forlasita de {total}.",
+ "diagnosis_swap_none": "La sistemo tute ne havas interŝanĝon. Vi devus pripensi aldoni almenaŭ {recommended} da interŝanĝo por eviti situaciojn en kiuj la sistemo restas sen memoro.",
+ "diagnosis_swap_notsomuch": "La sistemo havas nur {total}-interŝanĝon. Vi konsideru havi almenaŭ {recommended} por eviti situaciojn en kiuj la sistemo restas sen memoro.",
+ "diagnosis_regenconf_manually_modified_details": "Ĉi tio probable estas bona, se vi scias, kion vi faras! YunoHost ĉesigos ĝisdatigi ĉi tiun dosieron aŭtomate ... Sed atentu, ke YunoHost-ĝisdatigoj povus enhavi gravajn rekomendajn ŝanĝojn. Se vi volas, vi povas inspekti la diferencojn per /etc/resolv.conf
ne montrante al 127.0.0.1
.",
+ "diagnosis_dns_missing_record": "Laŭ la rekomendita DNS-agordo, vi devas aldoni DNS-registron kun\ntipo: {type}\nnomo: {name}\nvaloro: {value}",
+ "diagnosis_dns_discrepancy": "La DNS-registro kun tipo {type} kaj nomo {name} ne kongruas kun la rekomendita agordo.\nNuna valoro: {current}\nEsceptita valoro: {value}",
+ "diagnosis_services_conf_broken": "Agordo estas rompita por servo {service} !",
+ "diagnosis_services_bad_status": "Servo {service} estas {status} :(",
+ "diagnosis_ram_low": "La sistemo havas {available} ({available_percent}%) RAM forlasita de {total}. Estu zorgema.",
+ "diagnosis_swap_ok": "La sistemo havas {total} da interŝanĝoj!",
+ "diagnosis_regenconf_allgood": "Ĉiuj agordaj dosieroj kongruas kun la rekomendita agordo!",
+ "diagnosis_regenconf_manually_modified": "Agordodosiero {file}
ŝajnas esti permane modifita.",
+ "diagnosis_description_ip": "Interreta konektebleco",
+ "diagnosis_description_dnsrecords": "Registroj DNS",
+ "diagnosis_description_services": "Servo kontrolas staton",
+ "diagnosis_description_systemresources": "Rimedaj sistemoj",
+ "diagnosis_ports_could_not_diagnose": "Ne povis diagnozi, ĉu haveblaj havenoj de ekstere.",
+ "diagnosis_ports_could_not_diagnose_details": "Eraro: {error}",
+ "diagnosis_services_bad_status_tip": "Vi povas provi rekomenci la servon , kaj se ĝi ne funkcias, rigardu La servaj registroj en reteja (el la komandlinio, vi povas fari tion per {mountpoint}
(sur aparato {device}
) nur restas {free} ({free_percent}%) spaco restanta (el {total}). Estu zorgema.",
+ "diagnosis_diskusage_ok": "Stokado {mountpoint}
(sur aparato {device}
) ankoraŭ restas {free} ({free_percent}%) spaco (el {total})!",
+ "global_settings_setting_pop3_enabled": "Ebligu la protokolon POP3 por la poŝta servilo",
+ "diagnosis_unknown_categories": "La jenaj kategorioj estas nekonataj: {categories}",
+ "diagnosis_services_running": "Servo {service} funkcias!",
+ "diagnosis_ports_unreachable": "Haveno {port} ne atingeblas de ekstere.",
+ "diagnosis_ports_ok": "Haveno {port} atingeblas de ekstere.",
+ "diagnosis_ports_needed_by": "Eksponi ĉi tiun havenon necesas por {category} funkcioj (servo {service})",
+ "diagnosis_ports_forwarding_tip": "Por solvi ĉi tiun problemon, vi plej verŝajne devas agordi la plusendon de haveno en via interreta enkursigilo kiel priskribite en https://yunohost.org/isp_box_config",
+ "diagnosis_http_could_not_diagnose": "Ne povis diagnozi, ĉu atingeblas domajno de ekstere.",
+ "diagnosis_http_could_not_diagnose_details": "Eraro: {error}",
+ "diagnosis_http_ok": "Domajno {domain} atingebla per HTTP de ekster la loka reto.",
+ "diagnosis_http_unreachable": "Domajno {domain} ŝajnas neatingebla per HTTP de ekster la loka reto.",
+ "domain_cannot_remove_main_add_new_one": "Vi ne povas forigi '{domain}' ĉar ĝi estas la ĉefa domajno kaj via sola domajno, vi devas unue aldoni alian domajnon uzante ''yunohost domain add {ehlo_domain}
en via interreta enkursigilo aŭ en via retprovizanta interfaco. (Iuj gastigantaj provizantoj eble postulas, ke vi sendu al ili subtenan bileton por ĉi tio).",
+ "diagnosis_mail_fcrdns_nok_alternatives_4": "Iuj provizantoj ne lasos vin agordi vian inversan DNS (aŭ ilia funkcio povus esti rompita ...). Se vi spertas problemojn pro tio, konsideru jenajn solvojn:{global}
",
+ "diagnosis_ip_local": "Loka IP: {local}
",
+ "diagnosis_dns_point_to_doc": "Bonvolu kontroli la dokumentaron ĉe https://yunohost.org/dns_config se vi bezonas helpon pri agordo de DNS-registroj.",
+ "diagnosis_mail_outgoing_port_25_ok": "La SMTP-poŝta servilo kapablas sendi retpoŝtojn (eliranta haveno 25 ne estas blokita).",
+ "diagnosis_mail_outgoing_port_25_blocked_details": "Vi unue provu malŝlosi elirantan havenon 25 en via interreta enkursigilo aŭ en via retprovizanta interfaco. (Iuj gastigantaj provizantoj eble postulas, ke vi sendu al ili subtenan bileton por ĉi tio).",
+ "diagnosis_mail_ehlo_unreachable": "La SMTP-poŝta servilo estas neatingebla de ekstere sur IPv {ipversion}. Ĝi ne povos ricevi retpoŝtojn.",
+ "diagnosis_mail_ehlo_ok": "La SMTP-poŝta servilo atingeblas de ekstere kaj tial kapablas ricevi retpoŝtojn !",
+ "diagnosis_mail_ehlo_unreachable_details": "Ne povis malfermi rilaton sur la haveno 25 al via servilo en IPv {ipversion}. Ĝi ŝajnas esti neatingebla.{wrong_ehlo}
{right_ehlo}
{rdns_domain}
{ehlo_domain}
",
+ "diagnosis_mail_blacklist_ok": "La IP kaj domajnoj uzataj de ĉi tiu servilo ne ŝajnas esti listigitaj nigre",
+ "diagnosis_mail_blacklist_listed_by": "Via IP aŭ domajno {item}
estas listigita en {blacklist_name}",
+ "diagnosis_mail_blacklist_reason": "La negra listo estas: {reason}",
+ "diagnosis_mail_blacklist_website": "Post identigi kial vi listigas kaj riparis ĝin, bonvolu peti forigi vian IP aŭ domenion sur {blacklist_website}",
+ "diagnosis_mail_queue_ok": "{nb_pending} pritraktataj retpoŝtoj en la retpoŝtaj vostoj",
+ "diagnosis_mail_queue_unavailable": "Ne povas konsulti multajn pritraktitajn retpoŝtojn en vosto",
+ "diagnosis_mail_queue_unavailable_details": "Eraro: {error}",
+ "diagnosis_mail_queue_too_big": "Tro multaj pritraktataj retpoŝtoj en retpoŝto ({nb_pending} retpoŝtoj)",
+ "diagnosis_ports_partially_unreachable": "Haveno {port} ne atingebla de ekstere en IPv {failed}.",
+ "diagnosis_http_hairpinning_issue": "Via loka reto ŝajne ne havas haŭtadon.",
+ "diagnosis_http_hairpinning_issue_details": "Ĉi tio probable estas pro via ISP-skatolo / enkursigilo. Rezulte, homoj de ekster via loka reto povos aliri vian servilon kiel atendite, sed ne homoj de interne de la loka reto (kiel vi, probable?) Kiam uzas la domajnan nomon aŭ tutmondan IP. Eble vi povas plibonigi la situacion per rigardado al https://yunohost.org/dns_local_network",
+ "diagnosis_http_partially_unreachable": "Domajno {domain} ŝajnas neatingebla per HTTP de ekster la loka reto en IPv {failed}, kvankam ĝi funkcias en IPv {passed}.",
+ "diagnosis_http_nginx_conf_not_up_to_date": "La nginx-agordo de ĉi tiu domajno ŝajnas esti modifita permane, kaj malhelpas YunoHost diagnozi ĉu ĝi atingeblas per HTTP.",
+ "diagnosis_http_nginx_conf_not_up_to_date_details": "Por solvi la situacion, inspektu la diferencon per la komandlinio per yunohost app remove the_app_id
o cambialas a otro dominio usando yunohost app change-url the_app_id
antes de continuar con el borrado del dominio.",
"done": "Hecho.",
- "downloading": "Descargando...",
- "dyndns_cron_installed": "La tarea cron para DynDNS ha sido instalada",
- "dyndns_cron_remove_failed": "No se pudo eliminar la tarea cron DynDNS",
- "dyndns_cron_removed": "La tarea cron DynDNS ha sido eliminada",
- "dyndns_ip_update_failed": "No se pudo actualizar la dirección IP en el DynDNS",
- "dyndns_ip_updated": "Su dirección IP ha sido actualizada en el DynDNS",
- "dyndns_key_generating": "Se está generando la clave del DNS. Esto podría tardar unos minutos...",
+ "downloading": "Descargando…",
+ "dyndns_ip_update_failed": "No se pudo actualizar la dirección IP en DynDNS",
+ "dyndns_ip_updated": "Actualizada su IP en DynDNS",
+ "dyndns_key_generating": "Generando la clave del DNS. Esto podría tardar un rato.",
"dyndns_key_not_found": "No se ha encontrado la clave DNS para el dominio",
- "dyndns_no_domain_registered": "Ningún dominio ha sido registrado con DynDNS",
- "dyndns_registered": "El dominio DynDNS ha sido registrado",
- "dyndns_registration_failed": "No se pudo registrar el dominio DynDNS: {error:s}",
- "dyndns_unavailable": "El dominio {domain:s} no está disponible.",
- "executing_command": "Ejecutando el comando '{command:s}'...",
- "executing_script": "Ejecutando el script '{script:s}'...",
- "extracting": "Extrayendo...",
- "field_invalid": "Campo no válido '{:s}'",
+ "dyndns_no_domain_registered": "Ningún dominio registrado con DynDNS",
+ "dyndns_registered": "Registrado dominio de DynDNS",
+ "dyndns_registration_failed": "No se pudo registrar el dominio de DynDNS: {error}",
+ "dyndns_unavailable": "El dominio «{domain}» no está disponible.",
+ "extracting": "Extrayendo…",
+ "field_invalid": "Campo no válido '{}'",
"firewall_reload_failed": "No se pudo recargar el cortafuegos",
- "firewall_reloaded": "El cortafuegos ha sido recargado",
- "firewall_rules_cmd_failed": "No se pudieron aplicar algunas reglas del cortafuegos. Para más información consulte el registro.",
- "format_datetime_short": "%d/%m/%Y %I:%M %p",
- "hook_argument_missing": "Falta un parámetro '{:s}'",
- "hook_choice_invalid": "Selección inválida '{:s}'",
- "hook_exec_failed": "No se puede ejecutar el script: {path:s}",
- "hook_exec_not_terminated": "La ejecución del script no ha terminado: {path:s}",
- "hook_list_by_invalid": "Enumerar los hooks por validez",
- "hook_name_unknown": "Nombre de hook desconocido '{name:s}'",
+ "firewall_reloaded": "Cortafuegos recargado",
+ "firewall_rules_cmd_failed": "Algunos comandos para aplicar reglas del cortafuegos han fallado. Más información en el registro.",
+ "hook_exec_failed": "No se pudo ejecutar el guión: {path}",
+ "hook_exec_not_terminated": "El guión no terminó correctamente:{path}",
+ "hook_list_by_invalid": "Esta propiedad no se puede usar para enumerar ganchos («hooks»)",
+ "hook_name_unknown": "Nombre de hook desconocido '{name}'",
"installation_complete": "Instalación finalizada",
- "installation_failed": "No se pudo realizar la instalación",
"ip6tables_unavailable": "No puede modificar ip6tables aquí. O bien está en un 'container' o su kernel no soporta esta opción",
"iptables_unavailable": "No puede modificar iptables aquí. O bien está en un 'container' o su kernel no soporta esta opción",
- "ldap_initialized": "Se ha inicializado LDAP",
- "license_undefined": "indefinido",
- "mail_alias_remove_failed": "No se pudo eliminar el alias de correo '{mail:s}'",
- "mail_domain_unknown": "El dominio de correo '{domain:s}' es desconocido",
- "mail_forward_remove_failed": "No se pudo eliminar el reenvío de correo '{mail:s}'",
- "maindomain_change_failed": "No se pudo cambiar el dominio principal",
- "maindomain_changed": "Se ha cambiado el dominio principal",
- "monitor_disabled": "La monitorización del sistema ha sido deshabilitada",
- "monitor_enabled": "La monitorización del sistema ha sido habilitada",
- "monitor_glances_con_failed": "No se pudo conectar al servidor Glances",
- "monitor_not_enabled": "La monitorización del sistema no está habilitada",
- "monitor_period_invalid": "Período de tiempo no válido",
- "monitor_stats_file_not_found": "No se pudo encontrar el archivo de estadísticas",
- "monitor_stats_no_update": "No hay estadísticas de monitorización para actualizar",
- "monitor_stats_period_unavailable": "No hay estadísticas para el período",
- "mountpoint_unknown": "Punto de montaje desconocido",
- "mysql_db_creation_failed": "No se pudo crear la base de datos MySQL",
- "mysql_db_init_failed": "No se pudo iniciar la base de datos MySQL",
- "mysql_db_initialized": "La base de datos MySQL ha sido inicializada",
- "network_check_mx_ko": "El registro DNS MX no está configurado",
- "network_check_smtp_ko": "El puerto 25 (SMTP) para el correo saliente parece estar bloqueado por su red",
- "network_check_smtp_ok": "El puerto de salida del correo electrónico (25, SMTP) no está bloqueado",
- "new_domain_required": "Debe proporcionar el nuevo dominio principal",
- "no_appslist_found": "No se ha encontrado ninguna lista de aplicaciones",
- "no_internet_connection": "El servidor no está conectado a Internet",
- "no_ipv6_connectivity": "La conexión por IPv6 no está disponible",
- "no_restore_script": "No se ha encontrado un script de restauración para la aplicación '{app:s}'",
- "not_enough_disk_space": "No hay suficiente espacio en '{path:s}'",
- "package_not_installed": "El paquete '{pkgname}' no está instalado",
- "package_unexpected_error": "Ha ocurrido un error inesperado procesando el paquete '{pkgname}'",
- "package_unknown": "Paquete desconocido '{pkgname}'",
- "packages_no_upgrade": "No hay paquetes para actualizar",
- "packages_upgrade_critical_later": "Los paquetes críticos ({packages:s}) serán actualizados más tarde",
+ "mail_alias_remove_failed": "No se pudo eliminar el alias de correo «{mail}»",
+ "mail_domain_unknown": "Dirección de correo no válida para el dominio «{domain}». Use un dominio administrado por este servidor.",
+ "mail_forward_remove_failed": "No se pudo eliminar el reenvío de correo «{mail}»",
+ "main_domain_change_failed": "No se pudo cambiar el dominio principal",
+ "main_domain_changed": "El dominio principal ha cambiado",
+ "not_enough_disk_space": "No hay espacio libre suficiente en «{path}»",
"packages_upgrade_failed": "No se pudieron actualizar todos los paquetes",
- "path_removal_failed": "No se pudo eliminar la ruta {:s}",
- "pattern_backup_archive_name": "Debe ser un nombre de archivo válido con un máximo de 30 caracteres, solo se admiten caracteres alfanuméricos, los guiones -_ y el punto",
+ "pattern_backup_archive_name": "Debe ser un nombre de archivo válido con un máximo de 30 caracteres, solo se admiten caracteres alfanuméricos y los caracteres -_. (guiones y punto)",
"pattern_domain": "El nombre de dominio debe ser válido (por ejemplo mi-dominio.org)",
- "pattern_email": "Debe ser una dirección de correo electrónico válida (por ejemplo, alguien@dominio.org)",
+ "pattern_email": "Debe ser una dirección de correo electrónico válida (p.ej. alguien@example.com)",
"pattern_firstname": "Debe ser un nombre válido",
"pattern_lastname": "Debe ser un apellido válido",
- "pattern_listname": "Solo se pueden usar caracteres alfanuméricos y el guion bajo",
- "pattern_mailbox_quota": "El tamaño de cuota debe tener uno de los sufijos b/k/M/G/T. Usar 0 para cuota ilimitada",
+ "pattern_mailbox_quota": "Debe ser un tamaño con el sufijo «b/k/M/G/T» o «0» para no tener una cuota",
"pattern_password": "Debe contener al menos 3 caracteres",
- "pattern_port": "Debe ser un número de puerto válido (es decir, entre 0-65535)",
"pattern_port_or_range": "Debe ser un número de puerto válido (es decir entre 0-65535) o un intervalo de puertos (por ejemplo 100:200)",
- "pattern_positive_number": "Deber ser un número positivo",
"pattern_username": "Solo puede contener caracteres alfanuméricos o el guión bajo",
- "port_already_closed": "El puerto {port:d} ya está cerrado para las conexiones {ip_version:s}",
- "port_already_opened": "El puerto {port:d} ya está abierto para las conexiones {ip_version:s}",
- "port_available": "El puerto {port:d} está disponible",
- "port_unavailable": "El puerto {port:d} no está disponible",
- "restore_action_required": "Debe especificar algo que restaurar",
- "restore_already_installed_app": "Una aplicación con la id '{app:s}' ya está instalada",
- "restore_app_failed": "No se puede restaurar la aplicación '{app:s}'",
- "restore_cleaning_failed": "No se puede limpiar el directorio temporal de restauración",
- "restore_complete": "Restauración finalizada",
- "restore_confirm_yunohost_installed": "¿Realmente desea restaurar un sistema ya instalado? [{answers:s}]",
+ "port_already_closed": "El puerto {port} ya está cerrado para las conexiones {ip_version}",
+ "port_already_opened": "El puerto {port} ya está abierto para las conexiones {ip_version}",
+ "restore_already_installed_app": "Una aplicación con el ID «{app}» ya está instalada",
+ "app_restore_failed": "No se pudo restaurar la aplicación «{app}»: {error}",
+ "restore_cleaning_failed": "No se pudo limpiar el directorio temporal de restauración",
+ "restore_complete": "Restaurada",
+ "restore_confirm_yunohost_installed": "¿Realmente desea restaurar un sistema ya instalado? [{answers}]",
"restore_failed": "No se pudo restaurar el sistema",
- "restore_hook_unavailable": "El script de restauración '{part:s}' no está disponible en su sistema y tampoco en el archivo",
+ "restore_hook_unavailable": "El script de restauración para «{part}» no está disponible en su sistema y tampoco en el archivo",
"restore_nothings_done": "No se ha restaurado nada",
- "restore_running_app_script": "Ejecutando el script de restauración de la aplicación '{app:s}'...",
- "restore_running_hooks": "Ejecutando los hooks de restauración...",
- "service_add_failed": "No se pudo añadir el servicio '{service:s}'",
- "service_added": "Servicio '{service:s}' ha sido añadido",
- "service_already_started": "El servicio '{service:s}' ya ha sido inicializado",
- "service_already_stopped": "El servicio '{service:s}' ya ha sido detenido",
- "service_cmd_exec_failed": "No se pudo ejecutar el comando '{command:s}'",
- "service_conf_file_backed_up": "Se ha realizado una copia de seguridad del archivo de configuración '{conf}' en '{backup}'",
- "service_conf_file_copy_failed": "No se puede copiar el nuevo archivo de configuración '{new}' a {conf}",
- "service_conf_file_manually_modified": "El archivo de configuración '{conf}' ha sido modificado manualmente y no será actualizado",
- "service_conf_file_manually_removed": "El archivo de configuración '{conf}' ha sido eliminado manualmente y no será creado",
- "service_conf_file_not_managed": "El archivo de configuración '{conf}' no está gestionado y no será actualizado",
- "service_conf_file_remove_failed": "No se puede eliminar el archivo de configuración '{conf}'",
- "service_conf_file_removed": "El archivo de configuración '{conf}' ha sido eliminado",
- "service_conf_file_updated": "El archivo de configuración '{conf}' ha sido actualizado",
- "service_conf_up_to_date": "La configuración del servicio '{service}' ya está actualizada",
- "service_conf_updated": "La configuración ha sido actualizada para el servicio '{service}'",
- "service_conf_would_be_updated": "La configuración podría haber sido actualizada para el servicio '{service} 1'",
- "service_disable_failed": "No se pudo deshabilitar el servicio '{service:s}'",
- "service_disabled": "El servicio '{service:s}' ha sido deshabilitado",
- "service_enable_failed": "No se pudo habilitar el servicio '{service:s}'",
- "service_enabled": "El servicio '{service:s}' ha sido habilitado",
- "service_no_log": "No hay ningún registro para el servicio '{service:s}'",
- "service_regenconf_dry_pending_applying": "Comprobando configuración pendiente que podría haber sido aplicada al servicio '{service}'...",
- "service_regenconf_failed": "No se puede regenerar la configuración para el servicio(s): {services}",
- "service_regenconf_pending_applying": "Aplicando la configuración pendiente para el servicio '{service}'...",
- "service_remove_failed": "No se pudo desinstalar el servicio '{service:s}'",
- "service_removed": "El servicio '{service:s}' ha sido desinstalado",
- "service_start_failed": "No se pudo iniciar el servicio '{service:s}'\n\nRegistros de servicio recientes : {logs:s}",
- "service_started": "El servicio '{service:s}' ha sido iniciado",
- "service_status_failed": "No se pudo determinar el estado del servicio '{service:s}'",
- "service_stop_failed": "No se pudo detener el servicio '{service:s}'",
- "service_stopped": "El servicio '{service:s}' ha sido detenido",
- "service_unknown": "Servicio desconocido '{service:s}'",
- "ssowat_conf_generated": "Se ha generado la configuración de SSOwat",
- "ssowat_conf_updated": "La configuración de SSOwat ha sido actualizada",
- "system_upgraded": "El sistema ha sido actualizado",
- "system_username_exists": "El nombre de usuario ya existe en el sistema",
- "unbackup_app": "La aplicación '{app:s}' no se guardará",
- "unexpected_error": "Ha ocurrido un error inesperado",
- "unit_unknown": "Unidad desconocida '{unit:s}'",
+ "restore_running_app_script": "Restaurando la aplicación «{app}»…",
+ "restore_running_hooks": "Ejecutando los ganchos de restauración…",
+ "service_add_failed": "No se pudo añadir el servicio «{service}»",
+ "service_added": "Se agregó el servicio '{service}'",
+ "service_already_started": "El servicio «{service}» ya está funcionando",
+ "service_already_stopped": "El servicio «{service}» ya ha sido detenido",
+ "service_cmd_exec_failed": "No se pudo ejecutar la orden «{command}»",
+ "service_disable_failed": "No se pudo hacer que el servicio '{service}' no se iniciara en el arranque.\n\nRegistros de servicio recientes: {logs}",
+ "service_disabled": "El servicio '{service}' ya no se iniciará cuando se inicie el sistema.",
+ "service_enable_failed": "No se pudo hacer que el servicio '{service}' se inicie automáticamente en el arranque.\n\nRegistros de servicio recientes: {logs s}",
+ "service_enabled": "El servicio '{service}' ahora se iniciará automáticamente durante el arranque del sistema.",
+ "service_remove_failed": "No se pudo eliminar el servicio «{service}»",
+ "service_removed": "Servicio '{service}' eliminado",
+ "service_start_failed": "No se pudo iniciar el servicio «{service}»\n\nRegistro de servicios recientes:{logs}",
+ "service_started": "El servicio '{service}' comenzó",
+ "service_stop_failed": "No se pudo detener el servicio «{service}»\n\nRegistro de servicios recientes:{logs}",
+ "service_stopped": "Servicio '{service}' detenido",
+ "service_unknown": "Servicio desconocido '{service}'",
+ "ssowat_conf_generated": "Generada la configuración de SSOwat",
+ "ssowat_conf_updated": "Actualizada la configuración de SSOwat",
+ "system_upgraded": "Sistema actualizado",
+ "system_username_exists": "El nombre de usuario ya existe en la lista de usuarios del sistema",
+ "unbackup_app": "La aplicación '{app}' no se guardará",
+ "unexpected_error": "Algo inesperado salió mal: {error}",
"unlimit": "Sin cuota",
- "unrestore_app": "La aplicación '{app:s}' no será restaurada",
- "update_cache_failed": "No se pudo actualizar la caché de APT",
- "updating_apt_cache": "Actualizando lista de paquetes disponibles...",
+ "unrestore_app": "La aplicación '{app}' no será restaurada",
+ "updating_apt_cache": "Obteniendo las actualizaciones disponibles para los paquetes del sistema…",
"upgrade_complete": "Actualización finalizada",
- "upgrading_packages": "Actualizando paquetes...",
+ "upgrading_packages": "Actualizando paquetes…",
"upnp_dev_not_found": "No se encontró ningún dispositivo UPnP",
- "upnp_disabled": "UPnP ha sido deshabilitado",
- "upnp_enabled": "UPnP ha sido habilitado",
- "upnp_port_open_failed": "No se pudieron abrir puertos por UPnP",
- "user_created": "El usuario ha sido creado",
- "user_creation_failed": "No se pudo crear el usuario",
- "user_deleted": "El usuario ha sido eliminado",
- "user_deletion_failed": "No se pudo eliminar el usuario",
- "user_home_creation_failed": "No se puede crear el directorio de usuario 'home'",
- "user_info_failed": "No se pudo extraer la información del usuario",
- "user_unknown": "Usuario desconocido: {user:s}",
- "user_update_failed": "No se pudo actualizar el usuario",
- "user_updated": "El usuario ha sido actualizado",
+ "upnp_disabled": "UPnP desactivado",
+ "upnp_enabled": "UPnP activado",
+ "upnp_port_open_failed": "No se pudo abrir el puerto vía UPnP",
+ "user_created": "Usuario creado",
+ "user_creation_failed": "No se pudo crear el usuario {user}: {error}",
+ "user_deleted": "Usuario eliminado",
+ "user_deletion_failed": "No se pudo eliminar el usuario {user}: {error}",
+ "user_home_creation_failed": "No se pudo crear la carpeta «home» para el usuario",
+ "user_unknown": "Usuario desconocido: {user}",
+ "user_update_failed": "No se pudo actualizar el usuario {user}: {error}",
+ "user_updated": "Cambiada la información de usuario",
"yunohost_already_installed": "YunoHost ya está instalado",
- "yunohost_ca_creation_failed": "No se pudo crear el certificado de autoridad",
- "yunohost_configured": "YunoHost ha sido configurado",
- "yunohost_installing": "Instalando YunoHost...",
- "yunohost_not_installed": "YunoHost no está instalado o ha habido errores en la instalación. Ejecute 'yunohost tools postinstall'",
- "ldap_init_failed_to_create_admin": "La inicialización de LDAP falló al crear el usuario administrador",
- "mailbox_used_space_dovecot_down": "El servicio de e-mail Dovecot debe estar funcionando si desea obtener el espacio utilizado por el buzón de correo",
- "ssowat_persistent_conf_read_error": "Error al leer la configuración persistente de SSOwat: {error:s}. Edite el archivo /etc/ssowat/conf.json.persistent para corregir la sintaxis de JSON",
- "ssowat_persistent_conf_write_error": "Error al guardar la configuración persistente de SSOwat: {error:s}. Edite el archivo /etc/ssowat/conf.json.persistent para corregir la sintaxis de JSON",
- "certmanager_attempt_to_replace_valid_cert": "Está intentando sobrescribir un certificado correcto y válido para el dominio {domain:s}! (Use --force para omitir este mensaje)",
- "certmanager_domain_unknown": "Dominio desconocido {domain:s}",
- "certmanager_domain_cert_not_selfsigned": "El certificado para el dominio {domain:s} no es un certificado autofirmado. ¿Está seguro de que quiere reemplazarlo? (Use --force para omitir este mensaje)",
- "certmanager_certificate_fetching_or_enabling_failed": "Parece que al habilitar el nuevo certificado para el dominio {domain:s} ha fallado de alguna manera...",
- "certmanager_attempt_to_renew_nonLE_cert": "El certificado para el dominio {domain:s} no ha sido emitido por Let's Encrypt. ¡No se puede renovar automáticamente!",
- "certmanager_attempt_to_renew_valid_cert": "El certificado para el dominio {domain:s} no está a punto de expirar! Utilice --force para omitir este mensaje",
- "certmanager_domain_http_not_working": "Parece que no se puede acceder al dominio {domain:s} a través de HTTP. Compruebe que la configuración del DNS y de nginx es correcta",
- "certmanager_error_no_A_record": "No se ha encontrado un registro DNS 'A' para el dominio {domain:s}. Debe hacer que su nombre de dominio apunte a su máquina para poder instalar un certificado Let's Encrypt. (Si sabe lo que está haciendo, use --no-checks para desactivar esas comprobaciones.)",
- "certmanager_domain_dns_ip_differs_from_public_ip": "El registro DNS 'A' para el dominio {domain:s} es diferente de la IP de este servidor. Si recientemente modificó su registro A, espere a que se propague (existen algunos controladores de propagación DNS disponibles en línea). (Si sabe lo que está haciendo, use --no-checks para desactivar esas comprobaciones.)",
- "certmanager_cannot_read_cert": "Se ha producido un error al intentar abrir el certificado actual para el dominio {domain:s} (archivo: {file:s}), razón: {reason:s}",
- "certmanager_cert_install_success_selfsigned": "¡Se ha instalado correctamente un certificado autofirmado para el dominio {domain:s}!",
- "certmanager_cert_install_success": "¡Se ha instalado correctamente un certificado Let's Encrypt para el dominio {domain:s}!",
- "certmanager_cert_renew_success": "¡Se ha renovado correctamente el certificado Let's Encrypt para el dominio {domain:s}!",
- "certmanager_old_letsencrypt_app_detected": "\nYunohost ha detectado que la aplicación 'letsencrypt' está instalada, esto produce conflictos con las nuevas funciones de administración de certificados integradas en Yunohost. Si desea utilizar las nuevas funciones integradas, ejecute los siguientes comandos para migrar su instalación:\n\n Yunohost app remove letsencrypt\n Yunohost domain cert-install\n\nP.D.: esto intentará reinstalar los certificados para todos los dominios con un certificado Let's Encrypt o con un certificado autofirmado",
- "certmanager_hit_rate_limit": "Se han emitido demasiados certificados recientemente para el conjunto de dominios {domain:s}. Por favor, inténtelo de nuevo más tarde. Consulte https://letsencrypt.org/docs/rate-limits/ para obtener más detalles",
+ "yunohost_configured": "YunoHost está ahora configurado",
+ "yunohost_installing": "Instalando YunoHost…",
+ "yunohost_not_installed": "YunoHost no está correctamente instalado. Ejecute «yunohost tools postinstall»",
+ "mailbox_used_space_dovecot_down": "El servicio de buzón Dovecot debe estar activo si desea recuperar el espacio usado del buzón",
+ "certmanager_attempt_to_replace_valid_cert": "Está intentando sobrescribir un certificado correcto y válido para el dominio {domain}! (Use --force para omitir este mensaje)",
+ "certmanager_domain_cert_not_selfsigned": "El certificado para el dominio {domain} no es un certificado autofirmado. ¿Está seguro de que quiere reemplazarlo? (Use «--force» para hacerlo)",
+ "certmanager_certificate_fetching_or_enabling_failed": "El intento de usar el nuevo certificado para {domain} no ha funcionado…",
+ "certmanager_attempt_to_renew_nonLE_cert": "El certificado para el dominio «{domain}» no ha sido emitido por Let's Encrypt. ¡No se puede renovar automáticamente!",
+ "certmanager_attempt_to_renew_valid_cert": "¡El certificado para el dominio «{domain}» no está a punto de expirar! (Puede usar --force si sabe lo que está haciendo)",
+ "certmanager_domain_http_not_working": "Parece que no se puede acceder al dominio {domain} a través de HTTP. Por favor compruebe en los diagnósticos la categoría 'Web'para más información. (Si sabe lo que está haciendo, utilice '--no-checks' para no realizar estas comprobaciones.)",
+ "certmanager_domain_dns_ip_differs_from_public_ip": "El registro DNS 'A' para el dominio '{domain}' es diferente de la IP de este servidor. Por favor comprueba los 'registros DNS' (básicos) la categoría de diagnósticos para mayor información. Si recientemente modificó su registro 'A', espere a que se propague (algunos verificadores de propagación de DNS están disponibles en línea). (Si sabe lo que está haciendo, use '--no-checks' para desactivar esos cheques)",
+ "certmanager_cannot_read_cert": "Se ha producido un error al intentar abrir el certificado actual para el dominio {domain} (archivo: {file}), razón: {reason}",
+ "certmanager_cert_install_success_selfsigned": "Instalado correctamente un certificado autofirmado para el dominio «{domain}»",
+ "certmanager_cert_install_success": "Instalado correctamente un certificado de Let's Encrypt para el dominio «{domain}»",
+ "certmanager_cert_renew_success": "Renovado correctamente el certificado de Let's Encrypt para el dominio «{domain}»",
+ "certmanager_hit_rate_limit": "Se han emitido demasiados certificados recientemente para este conjunto exacto de dominios {domain}. Pruebe de nuevo más tarde. Vea para más detalles https://letsencrypt.org/docs/rate-limits/",
"certmanager_cert_signing_failed": "No se pudo firmar el nuevo certificado",
- "certmanager_no_cert_file": "No se puede leer el certificado para el dominio {domain:s} (archivo: {file:s})",
- "certmanager_conflicting_nginx_file": "No se puede preparar el dominio para el desafío ACME: el archivo de configuración nginx {filepath:s} está en conflicto y debe ser eliminado primero",
- "domain_cannot_remove_main": "No se puede eliminar el dominio principal. Primero debe establecer un nuevo dominio principal",
- "certmanager_self_ca_conf_file_not_found": "No se ha encontrado el archivo de configuración para la autoridad de autofirma (file: {file:s})",
- "certmanager_unable_to_parse_self_CA_name": "No se puede procesar el nombre de la autoridad de autofirma (file: {file:s} 1)",
+ "certmanager_no_cert_file": "No se pudo leer el certificado para el dominio {domain} (archivo: {file})",
+ "domain_cannot_remove_main": "No puede eliminar '{domain}' ya que es el dominio principal, primero debe configurar otro dominio como el dominio principal usando 'yunohost domain main-domain -n /etc/resolv.conf
no apunta a 127.0.0.1
.",
+ "diagnosis_dns_missing_record": "Según la configuración DNS recomendada, deberías añadir un registro DNS\ntipo: {type}\nnombre: {name}\nvalor: {value}",
+ "diagnosis_diskusage_low": "El almacenamiento {mountpoint} (en dispositivo {device}) solo tiene {free} ({free_percent}%) de espacio disponible. Ten cuidado.",
+ "diagnosis_services_bad_status_tip": "Puedes intentar reiniciar el servicio, y si no funciona, echar un vistazo a los logs del servicio usando 'yunohost service log {service}' o a través de la sección 'Servicios' en webadmin.",
+ "diagnosis_ip_connected_ipv6": "¡El servidor está conectado a internet a través de IPv6!",
+ "diagnosis_ip_no_ipv6": "El servidor no cuenta con IPv6 funcional.",
+ "diagnosis_ip_dnsresolution_working": "¡DNS no está funcionando!",
+ "diagnosis_ip_broken_dnsresolution": "Parece que no funciona la resolución de nombre de dominio por alguna razón... ¿Hay algún firewall bloqueando peticiones DNS?",
+ "diagnosis_ip_weird_resolvconf": "La resolución de nombres de dominio DNS funciona, aunque parece que estás utilizando /etc/resolv.conf
personalizada.",
+ "diagnosis_ip_weird_resolvconf_details": "El fichero /etc/resolv.conf
debería ser un enlace simbólico a /etc/resolvconf/run/resolv.conf
a su vez debe apuntar a 127.0.0.1
(dnsmasq). Si lo que quieres es configurar la resolución DNS manualmente, porfavor modifica /etc/resolv.dnsmasq.conf
.",
+ "diagnosis_dns_good_conf": "La configuración de registros DNS es correcta para {domain} (categoría {category})",
+ "diagnosis_dns_bad_conf": "Algunos registros DNS faltan o están mal cofigurados para el dominio {domain} (categoría {category})",
+ "diagnosis_dns_discrepancy": "El siguiente registro DNS parace que no sigue la configuración recomendada {type}
{name}
{current}
{value}
",
+ "diagnosis_services_bad_status": "El servicio {service} está {status} :(",
+ "diagnosis_diskusage_verylow": "El almacenamiento {mountpoint} (en el dispositivo {device}) sólo tiene {free} ({free_percent}%) de espacio disponible. Deberías considerar la posibilidad de limpiar algo de espacio.",
+ "diagnosis_diskusage_ok": "¡El almacenamiento {mountpoint} (en el dispositivo {device}) todavía tiene {free} ({free_percent}%) de espacio libre!",
+ "diagnosis_services_conf_broken": "¡Mala configuración para el servicio {service}!",
+ "diagnosis_services_running": "¡El servicio {service} está en ejecución!",
+ "diagnosis_failed": "Error al obtener el resultado del diagnóstico para la categoría '{category}': {error}",
+ "diagnosis_ip_connected_ipv4": "¡El servidor está conectado a internet a través de IPv4!",
+ "diagnosis_security_vulnerable_to_meltdown_details": "Para corregir esto, debieras actualizar y reiniciar tu sistema para cargar el nuevo kernel de Linux (o contacta tu proveedor si esto no funciona). Mas información en https://meltdownattack.com/ .",
+ "diagnosis_ram_verylow": "Al sistema le queda solamente {available} ({available_percent}%) de RAM! (De un total de {total})",
+ "diagnosis_ram_low": "Al sistema le queda {available} ({available_percent}%) de RAM de un total de {total}. Cuidado.",
+ "diagnosis_ram_ok": "El sistema aun tiene {available} ({available_percent}%) de RAM de un total de {total}.",
+ "diagnosis_swap_none": "El sistema no tiene mas espacio de intercambio. Considera agregar por lo menos {recommended} de espacio de intercambio para evitar que el sistema se quede sin memoria.",
+ "diagnosis_swap_notsomuch": "Al sistema le queda solamente {total} de espacio de intercambio. Considera agregar al menos {recommended} para evitar que el sistema se quede sin memoria.",
+ "diagnosis_mail_outgoing_port_25_blocked": "El puerto de salida 25 parece estar bloqueado. Intenta desbloquearlo con el panel de configuración de tu proveedor de servicios de Internet (o proveedor de halbergue). Mientras tanto, el servidor no podrá enviar correos electrónicos a otros servidores.",
+ "diagnosis_regenconf_allgood": "Todos los archivos de configuración están en linea con la configuración recomendada!",
+ "diagnosis_regenconf_manually_modified": "El archivo de configuración {file} parece que ha sido modificado manualmente.",
+ "diagnosis_regenconf_manually_modified_details": "¡Esto probablemente esta BIEN si sabes lo que estás haciendo! YunoHost dejará de actualizar este fichero automáticamente... Pero ten en cuenta que las actualizaciones de YunoHost pueden contener importantes cambios que están recomendados. Si quieres puedes comprobar las diferencias mediante {global}
",
+ "diagnosis_mail_outgoing_port_25_ok": "El servidor de email SMTP puede mandar emails (puerto saliente 25 no está bloqueado).",
+ "diagnosis_mail_outgoing_port_25_blocked_details": "Primeramente deberías intentar desbloquear el puerto de salida 25 en la interfaz de control de tu router o en la interfaz de tu provedor de hosting. (Algunos hosting pueden necesitar que les abras un ticket de soporte para esto).",
+ "diagnosis_swap_tip": "Por favor tenga cuidado y sepa que si el servidor contiene swap en una tarjeta SD o un disco duro de estado sólido, esto reducirá drásticamente la vida útil del dispositivo.",
+ "diagnosis_domain_expires_in": "{domain} expira en {days} días.",
+ "diagnosis_domain_expiration_error": "¡Algunos dominios expirarán MUY PRONTO!",
+ "diagnosis_domain_expiration_warning": "¡Algunos dominios expirarán pronto!",
+ "diagnosis_domain_expiration_success": "Sus dominios están registrados y no expirarán pronto.",
+ "diagnosis_domain_expiration_not_found_details": "¿Parece que la información de WHOIS para el dominio {domain} no contiene información sobre la fecha de expiración?",
+ "diagnosis_domain_not_found_details": "¡El dominio {domain} no existe en la base de datos WHOIS o ha expirado!",
+ "diagnosis_domain_expiration_not_found": "No se pudo revisar la fecha de expiración para algunos dominios",
+ "diagnosis_dns_try_dyndns_update_force": "La configuración DNS de este dominio debería ser administrada automáticamente por Yunohost. Si no es el caso, puede intentar forzar una actualización ejecutando {local}
",
+ "diagnosis_ip_no_ipv6_tip": "Tener IPv6 funcionando no es obligatorio para que su servidor funcione, pero es mejor para la salud del Internet en general. IPv6 debería ser configurado automáticamente por el sistema o su proveedor si está disponible. De otra manera, es posible que tenga que configurar varias cosas manualmente, tal y como se explica en esta documentación https://yunohost.org/#/ipv6. Si no puede habilitar IPv6 o si parece demasiado técnico, puede ignorar esta advertencia con toda seguridad.",
+ "diagnosis_display_tip": "Para ver los problemas encontrados, puede ir a la sección de diagnóstico del webadmin, o ejecutar 'yunohost diagnosis show --issues --human-readable' en la línea de comandos.",
+ "diagnosis_package_installed_from_sury_details": "Algunos paquetes fueron accidentalmente instalados de un repositorio de terceros llamado Sury. El equipo Yunohost ha mejorado la estrategia para manejar estos pquetes, pero es posible que algunas instalaciones con aplicaciones de PHP7.3 en Stretch puedan tener algunas inconsistencias. Para solucionar esta situación, debería intentar ejecutar el siguiente comando: {item}
está marcado como maligno en {blacklist_name}",
+ "diagnosis_mail_blacklist_ok": "Las IP y los dominios utilizados en este servidor no parece que estén en ningún listado maligno (blacklist)",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "El DNS inverso actual es: {rdns_domain}
{ehlo_domain}
",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain": "La resolución de DNS inverso no está correctamente configurada mediante IPv{ipversion}. Algunos correos pueden fallar al ser enviados o pueden ser marcados como basura.",
+ "diagnosis_mail_fcrdns_nok_alternatives_6": "Algunos proveedores no permiten configurar el DNS inverso (o su funcionalidad puede estar rota...). Si tu DNS inverso está configurado correctamente para IPv4, puedes intentar deshabilitarlo para IPv6 cuando envies correos mediante el comando {ehlo_domain}
en la interfaz de internet de tu router o en la de tu proveedor de internet. (Algunos proveedores de internet en ocasiones necesitan que les solicites un ticket de soporte para ello).",
+ "diagnosis_mail_fcrdns_dns_missing": "No hay definida ninguna DNS inversa mediante IPv{ipversion}. Algunos correos puede que fallen al enviarse o puede que se marquen como basura.",
+ "diagnosis_mail_fcrdns_ok": "¡Las DNS inversas están bien configuradas!",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "Error: {error}",
+ "diagnosis_mail_ehlo_could_not_diagnose": "No pudimos diagnosticar si el servidor de correo postfix es accesible desde el exterior utilizando IPv{ipversion}.",
+ "diagnosis_mail_ehlo_wrong_details": "El EHLO recibido por el diagnosticador remoto de IPv{ipversion} es diferente del dominio de tu servidor.{wrong_ehlo}
{right_ehlo}
{mountpoint}
(روی دستگاه {device}
) فقط {free} ({free_percent}%) فضا باقی مانده(از {total}). مراقب باشید.",
+ "diagnosis_diskusage_verylow": "ذخیره سازی {mountpoint}
(روی دستگاه {device}
) فقط {free} ({free_percent}%) فضا باقی مانده (از {total}). شما واقعاً باید پاکسازی فضای ذخیره ساز را در نظر بگیرید!",
+ "diagnosis_services_bad_status_tip": "می توانید سعی کنید سرویس را راه اندازی مجدد کنید، و اگر کار نمی کند ، نگاهی داشته باشید بهسرویس در webadmin ثبت می شود (از خط فرمان ، می توانید این کار را انجام دهید با {type}
{name}
{current}
{value}
",
+ "diagnosis_dns_missing_record": "با توجه به پیکربندی DNS توصیه شده ، باید یک رکورد DNS با اطلاعات زیر اضافه کنید.{type}
{name}
{value}
",
+ "diagnosis_dns_bad_conf": "برخی از سوابق DNS برای دامنه {domain} (دسته {category}) وجود ندارد یا نادرست است",
+ "diagnosis_dns_good_conf": "سوابق DNS برای دامنه {domain} (دسته {category}) به درستی پیکربندی شده است",
+ "diagnosis_ip_weird_resolvconf_details": "پرونده /etc/resolv.conf
باید یک پیوند همراه برای /etc/resolvconf/run/resolv.conf
خود اشاره می کند به 127.0.0.1
(dnsmasq). اگر می خواهید راه حل های DNS را به صورت دستی پیکربندی کنید ، لطفاً ویرایش کنید /etc/resolv.dnsmasq.conf
.",
+ "diagnosis_ip_weird_resolvconf": "اینطور که پیداست تفکیک پذیری DNS کار می کند ، اما به نظر می رسد از سفارشی استفاده می کنید /etc/resolv.conf
.",
+ "diagnosis_ip_broken_resolvconf": "به نظر می رسد تفکیک پذیری نام دامنه در سرور شما شکسته شده است ، که به نظر می رسد مربوط به /etc/resolv.conf
و اشاره نکردن به 127.0.0.1
میباشد.",
+ "diagnosis_ip_broken_dnsresolution": "به نظر می رسد تفکیک پذیری نام دامنه به دلایلی خراب شده است... آیا فایروال درخواست های DNS را مسدود می کند؟",
+ "diagnosis_ip_dnsresolution_working": "تفکیک پذیری نام دامنه کار می کند!",
+ "diagnosis_ip_not_connected_at_all": "به نظر می رسد سرور اصلا به اینترنت متصل نیست !؟",
+ "diagnosis_ip_local": "IP محلی: {local}
",
+ "diagnosis_ip_global": "IP جهانی: {global}
",
+ "diagnosis_ip_no_ipv6_tip": "داشتن یک IPv6 فعال برای کار سرور شما اجباری نیست ، اما برای سلامت اینترنت به طور کلی بهتر است. IPv6 معمولاً باید در صورت موجود بودن توسط سیستم یا ارائه دهنده اینترنت شما به طور خودکار پیکربندی شود. در غیر این صورت ، ممکن است لازم باشد چند مورد را به صورت دستی پیکربندی کنید ، همانطور که در اسناد اینجا توضیح داده شده است: https://yunohost.org/#/ipv6.اگر نمی توانید IPv6 را فعال کنید یا اگر برای شما بسیار فنی به نظر می رسد ، می توانید با خیال راحت این هشدار را نادیده بگیرید.",
+ "diagnosis_ip_no_ipv6": "سرور IPv6 کار نمی کند.",
+ "diagnosis_ip_connected_ipv6": "سرور از طریق IPv6 به اینترنت متصل است!",
+ "diagnosis_ip_no_ipv4": "سرور IPv4 کار نمی کند.",
+ "diagnosis_ip_connected_ipv4": "سرور از طریق IPv4 به اینترنت متصل است!",
+ "diagnosis_no_cache": "هنوز هیچ حافظه نهانی معاینه و عیب یابی برای دسته '{category}' وجود ندارد",
+ "diagnosis_failed": "نتیجه معاینه و عیب یابی برای دسته '{category}' واکشی نشد: {error}",
+ "diagnosis_everything_ok": "همه چیز برای {category} خوب به نظر می رسد!",
+ "diagnosis_found_warnings": "مورد (های) {warnings} یافت شده که می تواند دسته {category} را بهبود بخشد.",
+ "diagnosis_found_errors_and_warnings": "{errors} مسائل مهم (و {warnings} هشدارها) مربوط به {category} پیدا شد!",
+ "diagnosis_found_errors": "{errors} مشکلات مهم مربوط به {category} پیدا شد!",
+ "diagnosis_ignored_issues": "(+ {nb_ignored} مسئله (ها) نادیده گرفته شده)",
+ "diagnosis_cant_run_because_of_dep": "در حالی که مشکلات مهمی در ارتباط با {dep} وجود دارد ، نمی توان عیب یابی را برای {category} اجرا کرد.",
+ "diagnosis_cache_still_valid": "(حافظه پنهان هنوز برای عیب یابی {category} معتبر است. هنوز دوباره تشخیص داده نمی شود!)",
+ "diagnosis_failed_for_category": "عیب یابی برای دسته '{category}' ناموفق بود: {error}",
+ "diagnosis_display_tip": "برای مشاهده مسائل پیدا شده ، می توانید به بخش تشخیص webadmin بروید یا از خط فرمان 'yunohost diagnosis show --issues --human-readable' را اجرا کنید.",
+ "diagnosis_package_installed_from_sury_details": "برخی از بسته ها ناخواسته از مخزن شخص ثالث به نام Sury نصب شده اند. تیم YunoHost استراتژی مدیریت این بسته ها را بهبود بخشیده ، اما انتظار می رود برخی از تنظیماتی که برنامه های PHP7.3 را در حالی که هنوز بر روی Stretch نصب شده اند نصب کرده اند ، ناسازگاری های باقی مانده ای داشته باشند. برای رفع این وضعیت ، باید دستور زیر را اجرا کنید: {file}
به صورت دستی اصلاح شده است.",
+ "diagnosis_regenconf_allgood": "همه فایلهای پیکربندی مطابق با تنظیمات توصیه شده است!",
+ "diagnosis_mail_queue_too_big": "تعداد زیادی ایمیل معلق در صف پست ({nb_pending} ایمیل)",
+ "diagnosis_mail_queue_unavailable_details": "خطا: {error}",
+ "diagnosis_mail_queue_unavailable": "نمی توان با تعدادی از ایمیل های معلق در صف مشورت کرد",
+ "diagnosis_mail_queue_ok": "{nb_pending} ایمیل های معلق در صف های ایمیل",
+ "diagnosis_mail_blacklist_website": "پس از شناسایی دلیل لیست شدن و رفع آن، با خیال راحت درخواست کنید IP یا دامنه شما حذف شود از {blacklist_website}",
+ "diagnosis_mail_blacklist_reason": "دلیل لیست سیاه: {reason}",
+ "diagnosis_mail_blacklist_listed_by": "IP یا دامنه شما {item}
در لیست سیاه {blacklist_name} قرار دارد",
+ "diagnosis_mail_blacklist_ok": "به نظر می رسد IP ها و دامنه های مورد استفاده این سرور در لیست سیاه قرار ندارند",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "DNS معکوس فعلی: {rdns_domain}
{ehlo_domain}
",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain": "DNS معکوس به درستی در IPv{ipversion} پیکربندی نشده است. ممکن است برخی از ایمیل ها تحویل داده نشوند یا به عنوان هرزنامه پرچم گذاری شوند.",
+ "diagnosis_mail_fcrdns_nok_alternatives_6": "برخی از ارائه دهندگان به شما اجازه نمی دهند DNS معکوس خود را پیکربندی کنید (یا ممکن است ویژگی آنها شکسته شود...). اگر DNS معکوس شما به درستی برای IPv4 پیکربندی شده است، با استفاده از آن می توانید هنگام ارسال ایمیل، استفاده از IPv6 را غیرفعال کنید. {ehlo_domain}
در رابط روتر اینترنت یا رابط ارائه دهنده میزبانی تان. (ممکن است برخی از ارائه دهندگان میزبانی از شما بخواهند که برای این کار تیکت پشتیبانی ارسال کنید).",
+ "diagnosis_mail_fcrdns_dns_missing": "در IPv{ipversion} هیچ DNS معکوسی تعریف نشده است. ممکن است برخی از ایمیل ها تحویل داده نشوند یا به عنوان هرزنامه پرچم گذاری شوند.",
+ "diagnosis_mail_fcrdns_ok": "DNS معکوس شما به درستی پیکربندی شده است!",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "خطا: {error}",
+ "diagnosis_mail_ehlo_could_not_diagnose": "نمی توان تشخیص داد که آیا سرور ایمیل postfix از خارج در IPv{ipversion} قابل دسترسی است یا خیر.",
+ "diagnosis_mail_ehlo_wrong_details": "EHLO دریافت شده توسط تشخیص دهنده از راه دور در IPv{ipversion} با دامنه سرور شما متفاوت است.{wrong_ehlo}
{right_ehlo}
{mountpoint}
(روی دستگاه {device}
) هنوز {free} فضا در دسترس دارد ({free_percent}%) فضای باقی مانده (از {total})!",
+ "diagnosis_http_nginx_conf_not_up_to_date": "به نظر می رسد که پیکربندی nginx این دامنه به صورت دستی تغییر کرده است و از تشخیص YunoHost در صورت دسترسی به HTTP جلوگیری می کند.",
+ "diagnosis_http_partially_unreachable": "به نظر می رسد که دامنه {domain} از طریق HTTP از خارج از شبکه محلی در IPv{failed} غیرقابل دسترسی است، اگرچه در IPv{passed} کار می کند.",
+ "diagnosis_http_unreachable": "به نظر می رسد دامنه {domain} از خارج از شبکه محلی از طریق HTTP قابل دسترسی نیست.",
+ "diagnosis_http_bad_status_code": "به نظر می رسد دستگاه دیگری (شاید روتر اینترنتی شما) به جای سرور شما پاسخ داده است./etc/resolv.conf
personnalisé.",
+ "diagnosis_ip_weird_resolvconf_details": "Le fichier /etc/resolv.conf
doit être un lien symbolique vers /etc/resolvconf/run/resolv.conf
lui-même pointant vers 127.0.0.1
(dnsmasq). Si vous souhaitez configurer manuellement les résolveurs DNS, veuillez modifier /etc/resolv.dnsmasq.conf
.",
+ "diagnosis_dns_missing_record": "Selon la configuration DNS recommandée, vous devez ajouter un enregistrement DNS{type}
{name}
{value}
",
+ "diagnosis_diskusage_ok": "L'espace de stockage {mountpoint}
(sur le périphérique {device}
) a encore {free} ({free_percent}%) espace restant (sur {total}) !",
+ "diagnosis_ram_ok": "Le système dispose encore de {available} ({available_percent}%) de RAM sur {total}.",
+ "diagnosis_regenconf_allgood": "Tous les fichiers de configuration sont conformes à la configuration recommandée !",
+ "diagnosis_security_vulnerable_to_meltdown": "Vous semblez vulnérable à la vulnérabilité de sécurité critique de Meltdown",
+ "diagnosis_basesystem_host": "Le serveur utilise Debian {debian_version}",
+ "diagnosis_basesystem_kernel": "Le serveur utilise le noyau Linux {kernel_version}",
+ "diagnosis_basesystem_ynh_single_version": "{package} version : {version} ({repo})",
+ "diagnosis_basesystem_ynh_main_version": "Le serveur utilise YunoHost {main_version} ({repo})",
+ "diagnosis_basesystem_ynh_inconsistent_versions": "Vous exécutez des versions incohérentes des packages YunoHost ... très probablement en raison d'une mise à niveau échouée ou partielle.",
+ "diagnosis_failed_for_category": "Échec du diagnostic pour la catégorie '{category}': {error}",
+ "diagnosis_cache_still_valid": "(Le cache est encore valide pour le diagnostic {category}. Il ne sera pas re-diagnostiqué pour le moment !)",
+ "diagnosis_ignored_issues": "(+ {nb_ignored} problème(s) ignoré(s))",
+ "diagnosis_found_warnings": "Trouvé {warnings} objet(s) pouvant être amélioré(s) pour {category}.",
+ "diagnosis_everything_ok": "Tout semble bien pour {category} !",
+ "diagnosis_failed": "Échec de la récupération du résultat du diagnostic pour la catégorie '{category}' : {error}",
+ "diagnosis_ip_connected_ipv4": "Le serveur est connecté à Internet en IPv4 !",
+ "diagnosis_ip_no_ipv4": "Le serveur ne dispose pas d'une adresse IPv4.",
+ "diagnosis_ip_connected_ipv6": "Le serveur est connecté à Internet en IPv6 !",
+ "diagnosis_ip_no_ipv6": "Le serveur ne dispose pas d'une adresse IPv6.",
+ "diagnosis_ip_dnsresolution_working": "La résolution de nom de domaine fonctionne !",
+ "diagnosis_ip_broken_dnsresolution": "La résolution du nom de domaine semble interrompue pour une raison quelconque ... Un pare-feu bloque-t-il les requêtes DNS ?",
+ "diagnosis_ip_broken_resolvconf": "La résolution du nom de domaine semble être cassée sur votre serveur, ce qui semble lié au fait que /etc/resolv.conf
ne pointe pas vers 127.0.0.1
.",
+ "diagnosis_dns_good_conf": "Les enregistrements DNS sont correctement configurés pour le domaine {domain} (catégorie {category})",
+ "diagnosis_dns_bad_conf": "Certains enregistrements DNS sont manquants ou incorrects pour le domaine {domain} (catégorie {category})",
+ "diagnosis_dns_discrepancy": "Cet enregistrement DNS ne semble pas correspondre à la configuration recommandée : {type}
{name}
{current}
{value}
",
+ "diagnosis_services_bad_status": "Le service {service} est {status} :-(",
+ "diagnosis_diskusage_verylow": "L'espace de stockage {mountpoint}
(sur l'appareil {device}
) ne dispose que de {free} ({free_percent}%) espace restant (sur {total}). Vous devriez vraiment envisager de nettoyer de l'espace !",
+ "diagnosis_diskusage_low": "L'espace de stockage {mountpoint}
(sur l'appareil {device}
) ne dispose que de {free} ({free_percent}%) espace restant (sur {total}). Faites attention.",
+ "diagnosis_ram_verylow": "Le système ne dispose plus que de {available} ({available_percent}%)! (sur {total})",
+ "diagnosis_ram_low": "Le système n'a plus de {available} ({available_percent}%) RAM sur {total}. Faites attention.",
+ "diagnosis_swap_none": "Le système n'a aucun espace de swap. Vous devriez envisager d'ajouter au moins {recommended} de swap pour éviter les situations où le système manque de mémoire.",
+ "diagnosis_swap_notsomuch": "Le système ne dispose que de {total} de swap. Vous devez envisager d'avoir au moins {recommended} pour éviter les situations où le système manque de mémoire.",
+ "diagnosis_swap_ok": "Le système dispose de {total} de swap !",
+ "diagnosis_regenconf_manually_modified": "Le fichier de configuration {file}
semble avoir été modifié manuellement.",
+ "diagnosis_regenconf_manually_modified_details": "C'est probablement OK si vous savez ce que vous faites ! YunoHost cessera de mettre à jour ce fichier automatiquement ... Mais attention, les mises à jour de YunoHost pourraient contenir d'importantes modifications recommandées. Si vous le souhaitez, vous pouvez inspecter les différences avec {ehlo_domain}
dans votre interface de routeur Internet ou votre interface d'hébergement. (Certains hébergeurs peuvent vous demander de leur envoyer un ticket de support pour cela).",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain": "Le DNS inverse n'est pas correctement configuré en IPv{ipversion}. Certains emails seront peut-être refusés ou considérés comme des spam.",
+ "diagnosis_mail_blacklist_ok": "Les adresses IP et les domaines utilisés par ce serveur ne semblent pas être sur liste noire",
+ "diagnosis_mail_blacklist_reason": "La raison de la liste noire est : {reason}",
+ "diagnosis_mail_blacklist_website": "Après avoir identifié la raison pour laquelle vous êtes répertorié et l'avoir corrigé, n'hésitez pas à demander le retrait de votre IP ou domaine sur {blacklist_website}",
+ "diagnosis_mail_queue_ok": "{nb_pending} emails en attente dans les files d'attente de messagerie",
+ "diagnosis_mail_queue_unavailable_details": "Erreur : {error}",
+ "diagnosis_mail_queue_too_big": "Trop d'emails en attente dans la file d'attente ({nb_pending} emails)",
+ "global_settings_setting_smtp_allow_ipv6": "Autoriser l'utilisation d'IPv6 pour recevoir et envoyer du courrier",
+ "diagnosis_display_tip": "Pour voir les problèmes détectés, vous pouvez accéder à la section Diagnostic du webadmin ou exécuter 'yunohost diagnosis show --issues --human-readable' à partir de la ligne de commande.",
+ "diagnosis_ip_global": "IP globale : {global}
",
+ "diagnosis_ip_local": "IP locale : {local}
",
+ "diagnosis_dns_point_to_doc": "Veuillez consulter la documentation sur https://yunohost.org/dns_config si vous avez besoin d'aide pour configurer les enregistrements DNS.",
+ "diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Certains fournisseurs ne vous laisseront pas débloquer le port sortant 25 parce qu'ils ne se soucient pas de la neutralité du Net. {wrong_ehlo}
{right_ehlo}
{rdns_domain}
{ehlo_domain}
",
+ "diagnosis_mail_blacklist_listed_by": "Votre IP ou domaine {item}
est sur liste noire sur {blacklist_name}",
+ "diagnosis_mail_queue_unavailable": "Impossible de consulter le nombre d'emails en attente dans la file d'attente",
+ "diagnosis_ports_partially_unreachable": "Le port {port} n'est pas accessible de l'extérieur en IPv{failed}.",
+ "diagnosis_http_hairpinning_issue": "Votre réseau local ne semble pas supporter l'hairpinning.",
+ "diagnosis_http_hairpinning_issue_details": "C'est probablement à cause de la box/routeur de votre fournisseur d'accès internet. Par conséquent, les personnes extérieures à votre réseau local pourront accéder à votre serveur comme prévu, mais pas les personnes internes au réseau local (comme vous, probablement ?) si elles utilisent le nom de domaine ou l'IP globale. Vous pourrez peut-être améliorer la situation en consultant https://yunohost.org/dns_local_network",
+ "diagnosis_http_partially_unreachable": "Le domaine {domain} semble inaccessible en HTTP depuis l'extérieur du réseau local en IPv{failed}, bien qu'il fonctionne en IPv{passed}.",
+ "diagnosis_http_nginx_conf_not_up_to_date": "La configuration Nginx de ce domaine semble avoir été modifiée manuellement et empêche YunoHost de diagnostiquer si elle est accessible en HTTP.",
+ "diagnosis_http_nginx_conf_not_up_to_date_details": "Pour corriger la situation, inspectez la différence avec la ligne de commande en utilisant les outils {type}
{name}
{current}
{value}
",
+ "diagnosis_dns_missing_record": "Facendo caso á configuración DNS recomendada, deberías engadir un rexistro DNS coa seguinte info.{type}
{name}
{value}
",
+ "diagnosis_dns_bad_conf": "Faltan algúns rexistros DNS ou están mal configurados para o dominio {domain} (categoría {category})",
+ "diagnosis_dns_good_conf": "Os rexistros DNS están correctamente configurados para o dominio {domain} (categoría {category})",
+ "diagnosis_ip_weird_resolvconf_details": "O ficheiro /etc/resolv.conf
debería ser unha ligazón simbólica a /etc/resolvconf/run/resolv.conf
apuntando el mesmo a 127.0.0.1
(dnsmasq). Se queres configurar manualmente a resolución DNS, por favor edita /etc/resolv.dnsmasq.conf
.",
+ "diagnosis_ip_weird_resolvconf": "A resolución DNS semella funcionar, mais parecese que estás a utilizar un /etc/resolv.conf
personalizado.",
+ "diagnosis_ip_broken_resolvconf": "A resolución de nomes de dominio semella non funcionar no teu servidor, que parece ter relación con que /etc/resolv.conf
non sinala a 127.0.0.1
.",
+ "diagnosis_ip_broken_dnsresolution": "A resolución de nomes de dominio semella que por algunha razón non funciona... Pode estar o cortalumes bloqueando as peticións DNS?",
+ "diagnosis_ip_dnsresolution_working": "A resolución de nomes de dominio está a funcionar!",
+ "diagnosis_ip_not_connected_at_all": "O servidor semella non ter ningún tipo de conexión a internet!?",
+ "diagnosis_ip_local": "IP local: {local}
",
+ "diagnosis_ip_global": "IP global: {global}
",
+ "diagnosis_ip_no_ipv6_tip": "Que o servidor teña conexión IPv6 non é obrigatorio para que funcione, pero é mellor para o funcionamento de Internet en conxunto. IPv6 debería estar configurado automáticamente no teu sistema ou provedor se está dispoñible. Doutro xeito, poderías ter que configurar manualmente algúns parámetros tal como se explica na documentación: https://yunohost.org/#/ipv6. Se non podes activar IPv6 ou é moi complicado para ti, podes ignorar tranquilamente esta mensaxe.",
+ "diagnosis_ip_no_ipv6": "O servidor non ten conexión IPv6.",
+ "diagnosis_ip_connected_ipv6": "O servidor está conectado a internet a través de IPv6!",
+ "diagnosis_ip_no_ipv4": "O servidor non ten conexión IPv4.",
+ "diagnosis_ip_connected_ipv4": "O servidor está conectado a internet a través de IPv4!",
+ "diagnosis_no_cache": "Aínda non hai datos na caché para '{category}'",
+ "diagnosis_failed": "Non se puido obter o resultado do diagnóstico para '{category}': {error}",
+ "diagnosis_everything_ok": "Semella todo correcto en {category}!",
+ "diagnosis_found_warnings": "Atoparonse {warnings} elemento(s) que poderían optimizarse en {category}.",
+ "diagnosis_services_bad_status": "O servizo {service} está {status} :(",
+ "diagnosis_services_conf_broken": "A configuración do {service} está estragada!",
+ "diagnosis_services_running": "O servizo {service} está en execución!",
+ "diagnosis_domain_expires_in": "{domain} caduca en {days} días.",
+ "diagnosis_domain_expiration_error": "Algúns dominios van caducan MOI PRONTO!",
+ "diagnosis_domain_expiration_warning": "Algúns dominios van caducar pronto!",
+ "diagnosis_domain_expiration_success": "Os teus dominios están rexistrados e non van caducar pronto.",
+ "diagnosis_domain_expiration_not_found_details": "A información WHOIS para o dominio {domain} non semella conter información acerca da data de caducidade?",
+ "diagnosis_domain_not_found_details": "O dominio {domain} non existe na base de datos de WHOIS ou está caducado!",
+ "diagnosis_domain_expiration_not_found": "Non se puido comprobar a data de caducidade para algúns dominios",
+ "diagnosis_dns_try_dyndns_update_force": "A xestión DNS deste dominio debería estar xestionada directamente por YunoHost. Se non fose o caso, podes intentar forzar unha actualización executando {mountpoint}
(no dispositivo {device}
) aínda ten {free} ({free_percent}%) de espazo restante (de {total})!",
+ "diagnosis_diskusage_low": "A almacenaxe {mountpoint}
(no dispositivo {device}
) só lle queda {free} ({free_percent}%) de espazo libre (de {total}). Ten coidado.",
+ "diagnosis_diskusage_verylow": "A almacenaxe {mountpoint}
(no dispositivo {device}
) só lle queda {free} ({free_percent}%) de espazo libre (de {total}). Deberías considerar liberar algún espazo!",
+ "diagnosis_services_bad_status_tip": "Podes intentar reiniciar o servizo, e se isto non funciona, mira os rexistros do servizo na webadmin (desde a liña de comandos con {ehlo_domain}
na interface do teu rúter de internet ou na interface do teu provedor de hospedaxe. (Algúns provedores de hospedaxe poderían pedirche que lle fagas unha solicitude por escrito para isto).",
+ "diagnosis_mail_fcrdns_dns_missing": "Non hai DNS inverso definido en IPv{ipversion}. Algúns emails poderían non ser entregrado ou ser marcados como spam.",
+ "diagnosis_mail_fcrdns_ok": "O DNS inverso está correctamente configurado!",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "Erro: {error}",
+ "diagnosis_mail_ehlo_could_not_diagnose": "Non se puido determinar se o servidor de email postfix é accesible desde o exterior en IPv{ipversion}.",
+ "diagnosis_mail_ehlo_wrong_details": "O EHLO recibido polo diagnosticador remoto en IPv{ipversion} é diferente ao dominio do teu servidor.{wrong_ehlo}
{right_ehlo}
{file}
semella que foi modificado manualmente.",
+ "diagnosis_regenconf_allgood": "Tódolos ficheiros de configuración seguen a configuración recomendada!",
+ "diagnosis_mail_queue_too_big": "Hai demasiados emails pendentes na cola de correo ({nb_pending} emails)",
+ "diagnosis_mail_queue_unavailable_details": "Erro: {error}",
+ "diagnosis_mail_queue_unavailable": "Non se pode consultar o número de emails pendentes na cola",
+ "diagnosis_mail_queue_ok": "{nb_pending} emails pendentes na cola de correo",
+ "diagnosis_mail_blacklist_website": "Tras ver a razón do bloqueo e arranxalo, considera solicitar que o teu dominio ou IP sexan eliminados de {blacklist_website}",
+ "diagnosis_mail_blacklist_reason": "A razón do bloqueo é: {reason}",
+ "diagnosis_mail_blacklist_listed_by": "O teu dominio ou IP {item}
está na lista de bloqueo {blacklist_name}",
+ "diagnosis_mail_blacklist_ok": "Os IPs e dominios utilizados neste servidor non parecen estar en listas de bloqueo",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "DNS inverso actual: {rdns_domain}
{ehlo_domain}
",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain": "O DNS inverso non está correctamente configurado para IPv{ipversion}. É posible que non se entreguen algúns emails ou sexan marcados como spam.",
+ "diagnosis_mail_fcrdns_nok_alternatives_6": "Algúns provedores non che permiten configurar DNS inverso (ou podería non funcionar...). Se o teu DNS inverso está correctamente configurado para IPv4, podes intentar desactivar o uso de IPv6 ao enviar os emails executando {type}
{name}
{current}
Expected value: {value}",
+ "diagnosis_dns_missing_record": "Stando alla configurazione DNS raccomandata, dovresti aggiungere un record DNS con le seguenti informazioni.
Type: {type}
Name: {name}
Value: {value}",
+ "diagnosis_dns_bad_conf": "Alcuni record DNS sono mancanti o incorretti per il dominio {domain} (categoria {category})",
+ "diagnosis_dns_good_conf": "I recordDNS sono configurati correttamente per il dominio {domain} (categoria {category})",
+ "diagnosis_ip_weird_resolvconf_details": "Il file /etc/resolv.conf
dovrebbe essere un symlink a /etc/resolvconf/run/resolv.conf
che punta a 127.0.0.1
(dnsmasq). Se vuoi configurare manualmente i DNS, modifica /etc/resolv.dnsmasq.conf
.",
+ "diagnosis_ip_weird_resolvconf": "La risoluzione dei nomi di rete sembra funzionare, ma mi pare che tu stia usando un /etc/resolv.conf
personalizzato.",
+ "diagnosis_ip_broken_resolvconf": "La risoluzione dei nomi di rete sembra non funzionare sul tuo server, e sembra collegato a /etc/resolv.conf
che non punta a 127.0.0.1
.",
+ "diagnosis_ip_broken_dnsresolution": "La risoluzione dei nomi di rete sembra non funzionare per qualche ragione... È presente un firewall che blocca le richieste DNS?",
+ "diagnosis_ip_dnsresolution_working": "Risoluzione dei nomi di rete funzionante!",
+ "diagnosis_ip_not_connected_at_all": "Sei sicuro che il server sia collegato ad Internet!?",
+ "diagnosis_ip_local": "IP locale: {local}
",
+ "diagnosis_ip_global": "IP globale: {global}
",
+ "diagnosis_ip_no_ipv6_tip": "Avere IPv6 funzionante non è obbligatorio per far funzionare il server, ma è un bene per Internet stesso. IPv6 dovrebbe essere configurato automaticamente dal sistema o dal tuo provider se è disponibile. Altrimenti, potresti aver bisogno di configurare alcune cose manualmente come è spiegato nella documentazione: https://yunohost.org/#/ipv6. Se non puoi abilitare IPv6 o se ti sembra troppo complicato per te, puoi tranquillamente ignorare questo avvertimento.",
+ "diagnosis_ip_no_ipv6": "Il server non ha IPv6 funzionante.",
+ "diagnosis_ip_connected_ipv6": "Il server è connesso ad Internet tramite IPv6!",
+ "diagnosis_ip_no_ipv4": "Il server non ha IPv4 funzionante.",
+ "diagnosis_ip_connected_ipv4": "Il server è connesso ad Internet tramite IPv4!",
+ "diagnosis_no_cache": "Nessuna diagnosi nella cache per la categoria '{category}'",
+ "diagnosis_found_warnings": "Trovato {warnings} oggetti che potrebbero essere migliorati per {category}.",
+ "diagnosis_failed": "Recupero dei risultati della diagnosi per la categoria '{category}' fallito: {error}",
+ "diagnosis_everything_ok": "Tutto ok per {category}!",
+ "diagnosis_found_errors_and_warnings": "Trovato {errors} problemi (e {warnings} alerts) significativi collegati a {category}!",
+ "diagnosis_found_errors": "Trovato {errors} problemi significativi collegati a {category}!",
+ "diagnosis_ignored_issues": "(+ {nb_ignored} problemi ignorati)",
+ "diagnosis_cant_run_because_of_dep": "Impossibile lanciare la diagnosi per {category} mentre ci sono problemi importanti collegati a {dep}.",
+ "diagnosis_cache_still_valid": "(La cache della diagnosi di {category} è ancora valida. Non la ricontrollo di nuovo per ora!)",
+ "diagnosis_failed_for_category": "Diagnosi fallita per la categoria '{category}:{error}",
+ "diagnosis_display_tip": "Per vedere i problemi rilevati, puoi andare alla sezione Diagnosi del amministratore, o eseguire 'yunohost diagnosis show --issues --human-readable' dalla riga di comando.",
+ "diagnosis_package_installed_from_sury_details": "Alcuni pacchetti sono stati inavvertitamente installati da un repository di terze parti chiamato Sury. Il team di YunoHost ha migliorato la gestione di tali pacchetti, ma ci si aspetta che alcuni setup di app PHP7.3 abbiano delle incompatibilità anche se sono ancora in Stretch. Per sistemare questa situazione, dovresti provare a lanciare il seguente comando: {cmd_to_fix} ",
+ "diagnosis_package_installed_from_sury": "Alcuni pacchetti di sistema dovrebbero fare il downgrade",
+ "diagnosis_mail_ehlo_bad_answer": "Un servizio diverso da SMTP ha risposto sulla porta 25 su IPv{ipversion}",
+ "diagnosis_mail_ehlo_unreachable_details": "Impossibile aprire una connessione sulla porta 25 sul tuo server su IPv{ipversion}. Sembra irraggiungibile.
1. La causa più probabile di questo problema è la porta 25 non correttamente inoltrata al tuo server.
2. Dovresti esser sicuro che il servizio postfix sia attivo.
3. Su setup complessi: assicuratu che nessun firewall o reverse-proxy stia interferendo.",
+ "diagnosis_mail_ehlo_unreachable": "Il server SMTP non è raggiungibile dall'esterno su IPv{ipversion}. Non potrà ricevere email.",
+ "diagnosis_mail_ehlo_ok": "Il server SMTP è raggiungibile dall'esterno e quindi può ricevere email!",
+ "diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Alcuni provider non ti permettono di aprire la porta 25 in uscita perché non gli importa della Net Neutrality.
- Alcuni mettono a disposizione un alternativa attraverso un mail server relay anche se implica che il relay ha la capacità di leggere il vostro traffico email.
- Un alternativa privacy-friendly è quella di usare una VPN *con un indirizzo IP pubblico dedicato* per bypassare questo tipo di limite. Vedi https://yunohost.org/#/vpn_advantage
- Puoi anche prendere in considerazione di cambiare per un provider pro Net Neutrality",
+ "diagnosis_mail_outgoing_port_25_blocked_details": "Come prima cosa dovresti sbloccare la porta 25 in uscita dall'interfaccia del tuo router internet o del tuo hosting provider. (Alcuni hosting provider potrebbero richiedere l'invio di un ticket di supporto per la richiesta).",
+ "diagnosis_mail_outgoing_port_25_blocked": "Il server SMTP non può inviare email ad altri server perché la porta 25 è bloccata in uscita su IPv{ipversion}.",
+ "diagnosis_mail_outgoing_port_25_ok": "Il server SMTP è abile all'invio delle email (porta 25 in uscita non bloccata).",
+ "diagnosis_swap_tip": "Attenzione. Sii consapevole che se il server ha lo swap su di una memoria SD o un disco SSD, potrebbe drasticamente ridurre la durata di vita del dispositivo.",
+ "diagnosis_swap_ok": "Il sistema ha {total} di memoria swap!",
+ "diagnosis_swap_notsomuch": "Il sistema ha solo {total} di swap. Dovresti considerare almeno di aggiungere {recommended} di memoria swap per evitare situazioni dove il sistema esaurisce la memoria.",
+ "diagnosis_swap_none": "Il sistema non ha lo swap. Dovresti considerare almeno di aggiungere {recommended} di memoria swap per evitare situazioni dove il sistema esaurisce la memoria.",
+ "diagnosis_ram_ok": "Il sistema ha ancora {available} ({available_percent}%) di RAM disponibile su {total}.",
+ "diagnosis_ram_low": "Il sistema ha solo {available} ({available_percent}%) di RAM disponibile (su {total}). Fa attenzione.",
+ "diagnosis_ram_verylow": "Il sistema ha solo {available} ({available_percent}%) di RAM disponibile (su {total})",
+ "diagnosis_diskusage_ok": "Lo storage {mountpoint}
(nel device {device}
ha solo {free} ({free_percent}%) di spazio libero rimanente (su {total})!",
+ "diagnosis_diskusage_low": "Lo storage {mountpoint}
(nel device {device}
ha solo {free} ({free_percent}%) di spazio libero rimanente (su {total}). Fa attenzione.",
+ "diagnosis_diskusage_verylow": "Lo storage {mountpoint}
(nel device {device}
ha solo {free} ({free_percent}%) di spazio libero rimanente (su {total}). Dovresti seriamente considerare di fare un po' di pulizia!",
+ "diagnosis_mail_fcrdns_nok_details": "Dovresti prima configurare il DNS inverso con {ehlo_domain}
nell'interfaccia del tuo router internet o del tuo hosting provider. (Alcuni hosting provider potrebbero richiedere l'invio di un ticket di supporto per la richiesta).",
+ "diagnosis_mail_fcrdns_dns_missing": "Nessun DNS inverso è configurato per IPv{ipversion}. Alcune email potrebbero non essere inviate o segnalate come spam.",
+ "diagnosis_mail_fcrdns_ok": "Il tuo DNS inverso è configurato correttamente!",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "Errore: {error}",
+ "diagnosis_mail_ehlo_could_not_diagnose": "Non è possibile verificare se il server mail postfix è raggiungibile dall'esterno su IPv{ipversion}.",
+ "diagnosis_mail_ehlo_wrong": "Un server mail SMTP diverso sta rispondendo su IPv{ipversion}. Probabilmente il tuo server non può ricevere email.",
+ "diagnosis_mail_ehlo_bad_answer_details": "Potrebbe essere un'altra macchina a rispondere al posto del tuo server.",
+ "diagnosis_mail_fcrdns_nok_alternatives_4": "Alcuni provider non ti permettono di configurare un DNS inverso (o la loro configurazione non funziona...). Se stai avendo problemi a causa di ciò, considera le seguenti soluzioni:
- Alcuni ISP mettono a disposizione un alternativa attraverso un mail server relay anche se implica che il relay ha la capacità di leggere il vostro traffico email.
- Un alternativa privacy-friendly è quella di usare una VPN *con un indirizzo IP pubblico dedicato* per bypassare questo tipo di limite. Vedi https://yunohost.org/#/vpn_advantage
- Puoi anche prendere in considerazione di cambiare internet provider",
+ "diagnosis_mail_ehlo_wrong_details": "L'EHLO ricevuto dalla diagnostica remota su IPv{ipversion} è differente dal dominio del tuo server.
EHLO ricevuto: {wrong_ehlo}
EHLO atteso: {right_ehlo}
La causa più comune di questo problema è la porta 25 non correttamente inoltrata al tuo server. Oppure assicurati che nessun firewall o reverse-proxy stia interferendo.",
+ "diagnosis_mail_blacklist_ok": "Gli IP e i domini utilizzati da questo server non sembrano essere nelle blacklist",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "DNS invero corrente: {rdns_domain}
Valore atteso: {ehlo_domain}
",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain": "Il DNS inverso non è correttamente configurato su IPv{ipversion}. Alcune email potrebbero non essere spedite o segnalate come SPAM.",
+ "diagnosis_mail_fcrdns_nok_alternatives_6": "Alcuni provider non permettono di configurare un DNS inverso (o non è configurato bene...). Se il tuo DNS inverso è correttamente configurato per IPv4, puoi provare a disabilitare l'utilizzo di IPv6 durante l'invio mail eseguendo yunohost settings set smtp.allow_ipv6 -v off . NB: se esegui il comando non sarà più possibile inviare o ricevere email da i pochi IPv6-only server mail esistenti.",
+ "yunohost_postinstall_end_tip": "La post-installazione è completata! Per rifinire il tuo setup, considera di:\n\t- aggiungere il primo utente nella sezione 'Utenti' del webadmin (o eseguendo da terminale 'yunohost user create ');\n\t- eseguire una diagnosi alla ricerca di problemi nella sezione 'Diagnosi' del webadmin (o eseguendo da terminale 'yunohost diagnosis run');\n\t- leggere 'Finalizing your setup' e 'Getting to know YunoHost' nella documentazione admin: https://yunohost.org/admindoc.",
+ "user_already_exists": "L'utente '{user}' esiste già",
+ "update_apt_cache_warning": "Qualcosa è andato storto mentre eseguivo l'aggiornamento della cache APT (package manager di Debian). Ecco il dump di sources.list, che potrebbe aiutare ad identificare le linee problematiche:\n{sourceslist}",
+ "update_apt_cache_failed": "Impossibile aggiornare la cache di APT (package manager di Debian). Ecco il dump di sources.list, che potrebbe aiutare ad identificare le linee problematiche:\n{sourceslist}",
+ "unknown_main_domain_path": "Percorso o dominio sconosciuto per '{app}'. Devi specificare un dominio e un percorso per poter specificare un URL per il permesso.",
+ "tools_upgrade_special_packages_completed": "Aggiornamento pacchetti YunoHost completato.\nPremi [Invio] per tornare al terminale",
+ "tools_upgrade_special_packages_explanation": "L'aggiornamento speciale continuerà in background. Per favore non iniziare nessun'altra azione sul tuo server per i prossimi ~10 minuti (dipende dalla velocità hardware). Dopo questo, dovrai ri-loggarti nel webadmin. Il registro di aggiornamento sarà disponibile in Strumenti → Log/Registri (nel webadmin) o dalla linea di comando eseguendo 'yunohost log list'.",
+ "tools_upgrade_special_packages": "Adesso aggiorno i pacchetti 'speciali' (correlati a yunohost)...",
+ "tools_upgrade_regular_packages_failed": "Impossibile aggiornare i pacchetti: {packages_list}",
+ "tools_upgrade_regular_packages": "Adesso aggiorno i pacchetti 'normali' (non correlati a yunohost)...",
+ "tools_upgrade_cant_unhold_critical_packages": "Impossibile annullare il blocco dei pacchetti critici/importanti...",
+ "tools_upgrade_cant_hold_critical_packages": "Impossibile bloccare i pacchetti critici/importanti...",
+ "tools_upgrade_cant_both": "Impossibile aggiornare sia il sistema e le app nello stesso momento",
+ "tools_upgrade_at_least_one": "Specifica 'apps', o 'system'",
+ "show_tile_cant_be_enabled_for_regex": "Non puoi abilitare 'show_tile' in questo momento, perché l'URL del permesso '{permission}' è una regex",
+ "show_tile_cant_be_enabled_for_url_not_defined": "Non puoi abilitare 'show_tile' in questo momento, devi prima definire un URL per il permesso '{permission}'",
+ "service_reloaded_or_restarted": "Il servizio '{service}' è stato ricaricato o riavviato",
+ "service_reload_or_restart_failed": "Impossibile ricaricare o riavviare il servizio '{service}'\n\nUltimi registri del servizio: {logs}",
+ "service_restarted": "Servizio '{service}' riavviato",
+ "service_restart_failed": "Impossibile riavviare il servizio '{service}'\n\nUltimi registri del servizio: {logs}",
+ "service_reloaded": "Servizio '{service}' ricaricato",
+ "service_reload_failed": "Impossibile ricaricare il servizio '{service}'\n\nUltimi registri del servizio: {logs}",
+ "service_regen_conf_is_deprecated": "'yunohost service regen-conf' è obsoleto! Per favore usa 'yunohost tools regen-conf' al suo posto.",
+ "service_description_yunohost-firewall": "Gestisce l'apertura e la chiusura delle porte ai servizi",
+ "service_description_yunohost-api": "Gestisce l'interazione tra l'interfaccia web YunoHost ed il sistema",
+ "service_description_ssh": "Ti consente di accedere da remoto al tuo server attraverso il terminale (protocollo SSH)",
+ "service_description_slapd": "Memorizza utenti, domini e info correlate",
+ "service_description_rspamd": "Filtra SPAM, e altre funzionalità legate alle mail",
+ "service_description_redis-server": "Un database specializzato usato per un veloce accesso ai dati, task queue, e comunicazioni tra programmi",
+ "service_description_postfix": "Usato per inviare e ricevere email",
+ "service_description_php7.3-fpm": "Esegue app scritte in PHP con NGINX",
+ "service_description_nginx": "Serve o permette l'accesso a tutti i siti pubblicati sul tuo server",
+ "service_description_mysql": "Memorizza i dati delle app (database SQL)",
+ "service_description_metronome": "Gestisce gli account di messaggistica instantanea XMPP",
+ "service_description_fail2ban": "Ti protegge dal brute-force e altri tipi di attacchi da Internet",
+ "service_description_dovecot": "Consente ai client mail di accedere/recuperare le email (via IMAP e POP3)",
+ "service_description_dnsmasq": "Gestisce la risoluzione dei domini (DNS)",
+ "server_reboot_confirm": "Il server si riavvierà immediatamente, sei sicuro? [{answers}]",
+ "server_reboot": "Il server si riavvierà",
+ "server_shutdown_confirm": "Il server si spegnerà immediatamente, sei sicuro? [{answers}]",
+ "server_shutdown": "Il server si spegnerà",
+ "root_password_replaced_by_admin_password": "La tua password di root è stata sostituita dalla tua password d'amministratore.",
+ "root_password_desynchronized": "La password d'amministratore è stata cambiata, ma YunoHost non ha potuto propagarla alla password di root!",
+ "restore_system_part_failed": "Impossibile ripristinare la sezione di sistema '{part}'",
+ "restore_removing_tmp_dir_failed": "Impossibile rimuovere una vecchia directory temporanea",
+ "restore_not_enough_disk_space": "Spazio libero insufficiente (spazio: {free_space}B, necessario: {needed_space}B, margine di sicurezza: {margin}B)",
+ "restore_may_be_not_enough_disk_space": "Il tuo sistema non sembra avere abbastanza spazio (libero: {free_space}B, necessario: {needed_space}B, margine di sicurezza: {margin}B)",
+ "restore_extracting": "Sto estraendo i file necessari dall'archivio...",
+ "restore_already_installed_apps": "Le seguenti app non possono essere ripristinate perché sono già installate: {apps}",
+ "regex_with_only_domain": "Non puoi usare una regex per il dominio, solo per i percorsi",
+ "regex_incompatible_with_tile": "/!\\ Packagers! Il permesso '{permission}' ha show_tile impostato su 'true' e perciò non è possibile definire un URL regex per l'URL principale",
+ "regenconf_need_to_explicitly_specify_ssh": "La configurazione ssh è stata modificata manualmente, ma devi specificare la categoria 'ssh' con --force per applicare le modifiche.",
+ "regenconf_pending_applying": "Applico le configurazioni in attesa per la categoria '{category}'...",
+ "regenconf_failed": "Impossibile rigenerare la configurazione per le categorie: {categories}",
+ "regenconf_dry_pending_applying": "Controllo configurazioni in attesa che potrebbero essere applicate alla categoria '{category}'...",
+ "regenconf_would_be_updated": "La configurazione sarebbe stata aggiornata per la categoria '{category}'",
+ "regenconf_updated": "Configurazione aggiornata per '{category}'",
+ "regenconf_up_to_date": "Il file di configurazione è già aggiornato per la categoria '{category}'",
+ "regenconf_now_managed_by_yunohost": "Il file di configurazione '{conf}' da adesso è gestito da YunoHost (categoria {category}).",
+ "regenconf_file_updated": "File di configurazione '{conf}' aggiornato",
+ "regenconf_file_removed": "File di configurazione '{conf}' rimosso",
+ "regenconf_file_remove_failed": "Impossibile rimuovere il file di configurazione '{conf}'",
+ "regenconf_file_manually_removed": "Il file di configurazione '{conf}' è stato rimosso manualmente, e non sarà generato",
+ "regenconf_file_manually_modified": "Il file di configurazione '{conf}' è stato modificato manualmente e non sarà aggiornato",
+ "regenconf_file_kept_back": "Il file di configurazione '{conf}' dovrebbe esser stato cancellato da regen-conf (categoria {category}), ma non è così.",
+ "regenconf_file_copy_failed": "Impossibile copiare il nuovo file di configurazione da '{new}' a '{conf}'",
+ "regenconf_file_backed_up": "File di configurazione '{conf}' salvato in '{backup}'",
+ "permission_require_account": "Il permesso {permission} ha senso solo per gli utenti con un account, quindi non può essere attivato per i visitatori.",
+ "permission_protected": "Il permesso {permission} è protetto. Non puoi aggiungere o rimuovere il gruppo visitatori dal permesso.",
+ "permission_updated": "Permesso '{permission}' aggiornato",
+ "permission_update_failed": "Impossibile aggiornare il permesso '{permission}': {error}",
+ "permission_not_found": "Permesso '{permission}' non trovato",
+ "permission_deletion_failed": "Impossibile cancellare il permesso '{permission}': {error}",
+ "permission_deleted": "Permesso '{permission}' cancellato",
+ "permission_currently_allowed_for_all_users": "Il permesso è attualmente garantito a tutti gli utenti oltre gli altri gruppi. Probabilmente vuoi o rimuovere il permesso 'all_user' o rimuovere gli altri gruppi per cui è garantito attualmente.",
+ "permission_creation_failed": "Impossibile creare i permesso '{permission}': {error}",
+ "permission_created": "Permesso '{permission}' creato",
+ "permission_cannot_remove_main": "Non è possibile rimuovere un permesso principale",
+ "permission_already_up_to_date": "Il permesso non è stato aggiornato perché la richiesta di aggiunta/rimozione è già coerente con lo stato attuale.",
+ "permission_already_exist": "Permesso '{permission}' esiste già",
+ "permission_already_disallowed": "Il gruppo '{group}' ha già il permesso '{permission}' disabilitato",
+ "permission_already_allowed": "Il gruppo '{group}' ha già il permesso '{permission}' abilitato",
+ "pattern_password_app": "Mi spiace, le password non possono contenere i seguenti caratteri: {forbidden_chars}",
+ "pattern_email_forward": "Dev'essere un indirizzo mail valido, simbolo '+' accettato (es: tizio+tag@example.com)",
+ "operation_interrupted": "L'operazione è stata interrotta manualmente?",
+ "invalid_number": "Dev'essere un numero",
+ "migrations_to_be_ran_manually": "Migrazione {id} dev'essere eseguita manualmente. Vai in Strumenti → Migrazioni nella pagina webadmin, o esegui `yunohost tools migrations run`.",
+ "migrations_success_forward": "Migrazione {id} completata",
+ "migrations_skip_migration": "Salto migrazione {id}...",
+ "migrations_running_forward": "Eseguo migrazione {id}...",
+ "migrations_pending_cant_rerun": "Queste migrazioni sono ancora in attesa, quindi non possono essere eseguite nuovamente: {ids}",
+ "migrations_not_pending_cant_skip": "Queste migrazioni non sono in attesa, quindi non possono essere saltate: {ids}",
+ "migrations_no_such_migration": "Non esiste una migrazione chiamata '{id}'",
+ "migrations_no_migrations_to_run": "Nessuna migrazione da eseguire",
+ "migrations_need_to_accept_disclaimer": "Per eseguire la migrazione {id}, devi accettare il disclaimer seguente:\n---\n{disclaimer}\n---\nSe accetti di eseguire la migrazione, per favore reinserisci il comando con l'opzione '--accept-disclaimer'.",
+ "migrations_must_provide_explicit_targets": "Devi specificare i target quando utilizzi '--skip' o '--force-rerun'",
+ "migrations_migration_has_failed": "Migrazione {id} non completata, annullamento. Errore: {exception}",
+ "migrations_loading_migration": "Caricamento migrazione {id}...",
+ "migrations_list_conflict_pending_done": "Non puoi usare sia '--previous' e '--done' allo stesso tempo.",
+ "migrations_exclusive_options": "'--auto', '--skip', e '--force-rerun' sono opzioni che si escludono a vicenda.",
+ "migrations_failed_to_load_migration": "Impossibile caricare la migrazione {id}: {error}",
+ "migrations_dependencies_not_satisfied": "Esegui queste migrazioni: '{dependencies_id}', prima di {id}.",
+ "migrations_cant_reach_migration_file": "Impossibile accedere ai file di migrazione nel path '%s'",
+ "migrations_already_ran": "Migrazioni già effettuate: {ids}",
+ "migration_0019_slapd_config_will_be_overwritten": "Sembra che tu abbia modificato manualmente la configurazione slapd. Per questa importante migrazione, YunoHost deve forzare l'aggiornamento della configurazione slapd. I file originali verranno back-uppati in {conf_backup_folder}.",
+ "migration_0019_add_new_attributes_in_ldap": "Aggiungi nuovi attributi ai permessi nel database LDAP",
+ "migration_0018_failed_to_reset_legacy_rules": "Impossibile resettare le regole iptables legacy: {error}",
+ "migration_0018_failed_to_migrate_iptables_rules": "Migrazione fallita delle iptables legacy a nftables: {error}",
+ "migration_0017_not_enough_space": "Libera abbastanza spazio in {path} per eseguire la migrazione.",
+ "migration_0017_postgresql_11_not_installed": "PostgreSQL 9.6 è installato, ma non PostgreSQL 11 ?! Qualcosa di strano potrebbe esser successo al tuo sistema :'( ...",
+ "migration_0017_postgresql_96_not_installed": "PostgreSQL non è stato installato sul tuo sistema. Nulla da fare.",
+ "migration_0015_weak_certs": "I seguenti certificati utilizzano ancora un algoritmo di firma debole e dovrebbero essere aggiornati per essere compatibili con la prossima versione di nginx: {certs}",
+ "migration_0015_cleaning_up": "Sto pulendo la cache e i pacchetti non più utili...",
+ "migration_0015_specific_upgrade": "Inizio l'aggiornamento dei pacchetti di sistema che necessitano di essere aggiornati da soli...",
+ "migration_0015_modified_files": "Attenzioni, i seguenti file sembrano esser stati modificati manualmente, e potrebbero essere sovrascritti dopo l'aggiornamento: {manually_modified_files}",
+ "migration_0015_problematic_apps_warning": "Alcune applicazioni potenzialmente problematiche sono state rilevate nel sistema. Sembra che non siano state installate attraverso il catalogo app YunoHost, o non erano flaggate come 'working'/'funzionanti'. Di conseguenza, non è possibile garantire che funzioneranno ancora dopo l'aggiornamento: {problematic_apps}",
+ "migration_0015_general_warning": "Attenzione, sappi che questa migrazione è un'operazione delicata. Il team YunoHost ha fatto del suo meglio nel controllarla e testarla, ma le probabilità che il sistema e/o qualche app si danneggi non sono nulle.\n\nPerciò, ti raccomandiamo di:\n\t- Effettuare un backup di tutti i dati e app importanti. Maggiori informazioni su https://yunohost.org/backup;\n\t- Sii paziente dopo aver lanciato l'operazione: in base alla tua connessione internet e al tuo hardware, potrebbero volerci alcune ore per aggiornare tutto.",
+ "migration_0015_system_not_fully_up_to_date": "Il tuo sistema non è completamente aggiornato. Esegui un aggiornamento classico prima di lanciare la migrazione a Buster.",
+ "migration_0015_not_enough_free_space": "Poco spazio libero disponibile in /var/! Dovresti avere almeno 1GB libero per effettuare questa migrazione.",
+ "migration_0015_not_stretch": "La distribuzione Debian corrente non è Stretch!",
+ "migration_0015_yunohost_upgrade": "Inizio l'aggiornamento del core di YunoHost...",
+ "migration_0015_still_on_stretch_after_main_upgrade": "Qualcosa è andato storto durante l'aggiornamento principale, il sistema sembra essere ancora su Debian Stretch",
+ "migration_0015_main_upgrade": "Inizio l'aggiornamento principale...",
+ "migration_0015_patching_sources_list": "Applico le patch a sources.lists...",
+ "migration_0015_start": "Inizio migrazione a Buster",
+ "migration_description_0019_extend_permissions_features": "Estendi il sistema di gestione dei permessi app",
+ "migration_description_0018_xtable_to_nftable": "Migra le vecchie regole di traffico network sul nuovo sistema nftable",
+ "migration_description_0017_postgresql_9p6_to_11": "Migra i database da PostgreSQL 9.6 a 11",
+ "migration_description_0016_php70_to_php73_pools": "MIgra i file di configurazione 'pool' di php7.0-fpm su php7.3",
+ "migration_description_0015_migrate_to_buster": "Aggiorna il sistema a Debian Buster e YunoHost 4.X",
+ "migrating_legacy_permission_settings": "Impostando le impostazioni legacy dei permessi..",
+ "mailbox_disabled": "E-mail disabilitate per l'utente {user}",
+ "log_user_permission_reset": "Resetta il permesso '{}'",
+ "log_user_permission_update": "Aggiorna gli accessi del permesso '{}'",
+ "log_user_group_update": "Aggiorna il gruppo '{}'",
+ "log_user_group_delete": "Cancella il gruppo '{}'",
+ "log_user_group_create": "Crea il gruppo '{}'",
+ "log_permission_url": "Aggiorna l'URL collegato al permesso '{}'",
+ "log_permission_delete": "Cancella permesso '{}'",
+ "log_permission_create": "Crea permesso '{}'",
+ "log_app_action_run": "Esegui l'azione dell'app '{}'",
+ "log_operation_unit_unclosed_properly": "Operazion unit non è stata chiusa correttamente",
+ "invalid_regex": "Regex invalida:'{regex}'",
+ "hook_list_by_invalid": "Questa proprietà non può essere usata per listare gli hooks",
+ "hook_json_return_error": "Impossibile leggere la risposta del hook {path}. Errore: {msg}. Contenuto raw: {raw_content}",
+ "group_user_not_in_group": "L'utente {user} non è nel gruppo {group}",
+ "group_user_already_in_group": "L'utente {user} è già nel gruppo {group}",
+ "group_update_failed": "Impossibile aggiornare il gruppo '{group}': {error}",
+ "group_updated": "Gruppo '{group}' aggiornato",
+ "group_unknown": "Gruppo '{group}' sconosciuto",
+ "group_deletion_failed": "Impossibile cancellare il gruppo '{group}': {error}",
+ "group_deleted": "Gruppo '{group}' cancellato",
+ "group_cannot_be_deleted": "Il gruppo {group} non può essere eliminato manualmente.",
+ "group_cannot_edit_primary_group": "Il gruppo '{group}' non può essere modificato manualmente. È il gruppo principale con lo scopo di contenere solamente uno specifico utente.",
+ "group_cannot_edit_visitors": "Il gruppo 'visitatori' non può essere modificato manualmente. È un gruppo speciale che rappresenta i visitatori anonimi",
+ "group_cannot_edit_all_users": "Il gruppo 'all_users' non può essere modificato manualmente. È un gruppo speciale che contiene tutti gli utenti registrati in YunoHost",
+ "group_creation_failed": "Impossibile creare il gruppo '{group}': {error}",
+ "group_created": "Gruppo '{group}' creato",
+ "group_already_exist_on_system_but_removing_it": "Il gruppo {group} esiste già tra i gruppi di sistema, ma YunoHost lo cancellerà...",
+ "group_already_exist_on_system": "Il gruppo {group} esiste già tra i gruppi di sistema",
+ "group_already_exist": "Il gruppo {group} esiste già",
+ "global_settings_setting_backup_compress_tar_archives": "Quando creo nuovi backup, usa un archivio (.tar.gz) al posto di un archivio non compresso (.tar). NB: abilitare quest'opzione significa create backup più leggeri, ma la procedura durerà di più e il carico CPU sarà maggiore.",
+ "global_settings_setting_smtp_relay_password": "Password del relay SMTP",
+ "global_settings_setting_smtp_relay_user": "User account del relay SMTP",
+ "global_settings_setting_smtp_relay_port": "Porta del relay SMTP",
+ "global_settings_setting_smtp_relay_host": "Utilizza SMTP relay per inviare mail al posto di questa instanza yunohost. Utile se sei in una di queste situazioni: la tua porta 25 è bloccata dal tuo provider ISP o VPS; hai un IP residenziale listato su DUHL; non sei puoi configurare il DNS inverso; oppure questo server non è direttamente esposto a Internet e vuoi usarne un'altro per spedire email.",
+ "global_settings_setting_smtp_allow_ipv6": "Permetti l'utilizzo di IPv6 per ricevere e inviare mail",
+ "global_settings_setting_pop3_enabled": "Abilita il protocollo POP3 per il server mail",
+ "dyndns_provider_unreachable": "Incapace di raggiungere il provider DynDNS {provider}: o il tuo YunoHost non è connesso ad internet o il server dynette è down.",
+ "dpkg_lock_not_available": "Impossibile eseguire il comando in questo momento perché un altro programma sta bloccando dpkg (il package manager di sistema)",
+ "domain_name_unknown": "Dominio '{domain}' sconosciuto",
+ "domain_cannot_remove_main_add_new_one": "Non puoi rimuovere '{domain}' visto che è il dominio principale nonché il tuo unico dominio, devi prima aggiungere un altro dominio eseguendo 'yunohost domain add ', impostarlo come dominio principale con 'yunohost domain main-domain n ', e solo allora potrai rimuovere il dominio '{domain}' eseguendo 'yunohost domain remove {domain}'.'",
+ "domain_cannot_add_xmpp_upload": "Non puoi aggiungere domini che iniziano per 'xmpp-upload.'. Questo tipo di nome è riservato per la funzionalità di upload XMPP integrata in YunoHost.",
+ "diagnosis_processes_killed_by_oom_reaper": "Alcuni processi sono stati terminati dal sistema che era a corto di memoria. Questo è un sintomo di insufficienza di memoria nel sistema o di un processo che richiede troppa memoria. Lista dei processi terminati:\n{kills_summary}",
+ "diagnosis_never_ran_yet": "Sembra che questo server sia stato impostato recentemente e non è presente nessun report di diagnostica. Dovresti partire eseguendo una diagnostica completa, da webadmin o da terminale con il comando 'yunohost diagnosis run'.",
+ "diagnosis_unknown_categories": "Le seguenti categorie sono sconosciute: {categories}",
+ "diagnosis_http_nginx_conf_not_up_to_date_details": "Per sistemare, ispeziona le differenze nel terminale eseguendo yunohost tools regen-conf nginx --dry-run --with-diff e se ti va bene, applica le modifiche con yunohost tools regen-conf ngix --force .",
+ "diagnosis_http_nginx_conf_not_up_to_date": "La configurazione nginx di questo dominio sembra esser stato modificato manualmente, e impedisce a YunoHost di controlalre se è raggiungibile su HTTP.",
+ "diagnosis_http_partially_unreachable": "Il dominio {domain} sembra irraggiungibile attraverso HTTP dall'esterno della tua LAN su IPv{failed}, anche se funziona su IPv{passed}.",
+ "diagnosis_http_unreachable": "Il dominio {domain} sembra irraggiungibile attraverso HTTP dall'esterno della tua LAN.",
+ "diagnosis_http_bad_status_code": "Sembra che un altro dispositivo (forse il tuo router internet) abbia risposto al posto del tuo server
1. La causa più comune è la porta 80 (e 443) non correttamente inoltrata al tuo server.
2. Su setup più complessi: assicurati che nessun firewall o reverse-proxy stia interferendo.",
+ "diagnosis_http_connection_error": "Errore connessione: impossibile connettersi al dominio richiesto, probabilmente è irraggiungibile.",
+ "diagnosis_http_timeout": "Andato in time-out cercando di contattare il server dall'esterno. Sembra essere irraggiungibile.
1. La causa più comune è la porta 80 (e 443) non correttamente inoltrata al tuo server.
2. Dovresti accertarti che il servizio nginx sia attivo.
3. Su setup più complessi: assicurati che nessun firewall o reverse-proxy stia interferendo.",
+ "diagnosis_http_ok": "Il dominio {domain} è raggiungibile attraverso HTTP al di fuori della tua LAN.",
+ "diagnosis_http_could_not_diagnose_details": "Errore: {error}",
+ "diagnosis_http_could_not_diagnose": "Non posso controllare se i domini sono raggiungibili dall'esterno su IPv{ipversion}.",
+ "diagnosis_http_hairpinning_issue_details": "Questo probabilmente è causato dal tuo ISP router. Come conseguenza, persone al di fuori della tua LAN saranno in grado di accedere al tuo server come atteso, ma non le persone all'interno della LAN (tipo te, immagino) utilizzando il dominio internet o l'IP globale. Dovresti essere in grado di migliorare la situazione visitando https://yunohost.org/dns_local_network",
+ "diagnosis_http_hairpinning_issue": "La tua rete locale sembra non avere \"hairpinning\" abilitato.",
+ "diagnosis_ports_forwarding_tip": "Per sistemare questo problema, probabilmente dovresti configurare l'inoltro della porta sul tuo router internet come descritto qui https://yunohost.org/isp_box_config",
+ "diagnosis_ports_needed_by": "Esporre questa porta è necessario per le feature di {category} (servizio {service})",
+ "diagnosis_ports_ok": "La porta {port} è raggiungibile dall'esterno.",
+ "diagnosis_ports_partially_unreachable": "La porta {port} non è raggiungibile dall'esterno su IPv{failed}.",
+ "diagnosis_ports_unreachable": "La porta {port} non è raggiungibile dall'esterno.",
+ "diagnosis_ports_could_not_diagnose_details": "Errore: {error}",
+ "diagnosis_ports_could_not_diagnose": "Impossibile diagnosticare se le porte sono raggiungibili dall'esterno su IPv{ipversion}.",
+ "diagnosis_description_regenconf": "Configurazioni sistema",
+ "diagnosis_description_mail": "Email",
+ "diagnosis_description_web": "Web",
+ "diagnosis_description_ports": "Esposizione porte",
+ "diagnosis_description_systemresources": "Risorse di sistema",
+ "diagnosis_description_services": "Check stato servizi",
+ "diagnosis_description_dnsrecords": "Record DNS",
+ "diagnosis_description_ip": "Connettività internet",
+ "diagnosis_description_basesystem": "Sistema base",
+ "diagnosis_security_vulnerable_to_meltdown_details": "Per sistemare, dovresti aggiornare il tuo sistema e fare il reboot per caricare il nuovo kernel linux (o contatta il tuo server provider se non funziona). Visita https://meltdownattack.com/ per maggiori info.",
+ "diagnosis_security_vulnerable_to_meltdown": "Sembra che tu sia vulnerabile alla vulnerabilità di sicurezza critica \"Meltdown\"",
+ "diagnosis_regenconf_manually_modified_details": "Questo è probabilmente OK se sai cosa stai facendo! YunoHost smetterà di aggiornare automaticamente questo file... Ma sappi che gli aggiornamenti di YunoHost potrebbero contenere importanti cambiamenti. Se vuoi, puoi controllare le differente con yunohost tools regen-conf {category} --dry-run --with-diff e forzare il reset della configurazione raccomandata con yunohost tools regen-conf {category} --force ",
+ "diagnosis_regenconf_manually_modified": "Il file di configurazione {file}
sembra esser stato modificato manualmente.",
+ "diagnosis_regenconf_allgood": "Tutti i file di configurazione sono allineati con le configurazioni raccomandate!",
+ "diagnosis_mail_queue_too_big": "Troppe email in attesa nella coda ({nb_pending} emails)",
+ "diagnosis_mail_queue_unavailable_details": "Errore: {error}",
+ "diagnosis_mail_queue_unavailable": "Impossibile consultare il numero di email in attesa",
+ "diagnosis_mail_queue_ok": "{nb_pending} emails in attesa nelle code",
+ "diagnosis_mail_blacklist_website": "Dopo aver identificato il motivo e averlo risolto, sentiti libero di chiedere di rimuovere il tuo IP o dominio da {blacklist_website}",
+ "diagnosis_mail_blacklist_reason": "Il motivo della blacklist è: {reason}",
+ "diagnosis_mail_blacklist_listed_by": "Il tuo IP o dominio {item}
è nella blacklist {blacklist_name}",
+ "diagnosis_backports_in_sources_list": "Sembra che apt (il package manager) sia configurato per utilizzare le backport del repository. A meno che tu non sappia quello che stai facendo, scoraggiamo fortemente di installare pacchetti tramite esse, perché ci sono alte probabilità di creare conflitti con il tuo sistema.",
+ "diagnosis_basesystem_hardware_model": "Modello server: {model}",
+ "postinstall_low_rootfsspace": "La radice del filesystem ha uno spazio totale inferiore ai 10 GB, ed è piuttosto preoccupante! Consumerai tutta la memoria molto velocemente! Raccomandiamo di avere almeno 16 GB per la radice del filesystem. Se vuoi installare YunoHost ignorando questo avviso, esegui nuovamente il postinstall con l'argomento --force-diskspace",
+ "domain_remove_confirm_apps_removal": "Rimuovere questo dominio rimuoverà anche le seguenti applicazioni:\n{apps}\n\nSei sicuro di voler continuare? [{answers}]",
+ "diagnosis_rootfstotalspace_critical": "La radice del filesystem ha un totale di solo {space}, ed è piuttosto preoccupante! Probabilmente consumerai tutta la memoria molto velocemente! Raccomandiamo di avere almeno 16 GB per la radice del filesystem.",
+ "diagnosis_rootfstotalspace_warning": "La radice del filesystem ha un totale di solo {space}. Potrebbe non essere un problema, ma stai attento perché potresti consumare tutta la memoria velocemente... Raccomandiamo di avere almeno 16 GB per la radice del filesystem.",
+ "restore_backup_too_old": "Questo archivio backup non può essere ripristinato perché è stato generato da una versione troppo vecchia di YunoHost.",
+ "permission_cant_add_to_all_users": "Il permesso {permission} non può essere aggiunto a tutto gli utenti.",
+ "migration_update_LDAP_schema": "Aggiorno lo schema LDAP...",
+ "migration_ldap_rollback_success": "Sistema ripristinato allo stato precedente.",
+ "migration_ldap_migration_failed_trying_to_rollback": "Impossibile migrare... provo a ripristinare il sistema.",
+ "migration_ldap_can_not_backup_before_migration": "Il backup del sistema non è stato completato prima che la migrazione fallisse. Errore: {error}",
+ "migration_ldap_backup_before_migration": "Sto generando il backup del database LDAP e delle impostazioni delle app prima di effettuare la migrazione.",
+ "migration_description_0020_ssh_sftp_permissions": "Aggiungi il supporto ai permessi SSH e SFTP",
+ "log_backup_create": "Crea un archivio backup",
+ "global_settings_setting_ssowat_panel_overlay_enabled": "Abilita il pannello sovrapposto SSOwat",
+ "global_settings_setting_security_ssh_port": "Porta SSH",
+ "diagnosis_sshd_config_inconsistent_details": "Esegui yunohost settings set security.ssh.port -v PORTA_SSH per definire la porta SSH, e controlla con yunohost tools regen-conf ssh --dry-run --with-diff , poi yunohost tools regen-conf ssh --force per resettare la tua configurazione con le raccomandazioni YunoHost.",
+ "diagnosis_sshd_config_inconsistent": "Sembra che la porta SSH sia stata modificata manualmente in /etc/ssh/sshd_config: A partire da YunoHost 4.2, una nuova configurazione globale 'security.ssh.port' è disponibile per evitare di modificare manualmente la configurazione.",
+ "diagnosis_sshd_config_insecure": "Sembra che la configurazione SSH sia stata modificata manualmente, ed non è sicuro dato che non contiene le direttive 'AllowGroups' o 'Allowusers' che limitano l'accesso agli utenti autorizzati.",
+ "backup_create_size_estimation": "L'archivio conterrà circa {size} di dati.",
+ "app_restore_script_failed": "C'è stato un errore all'interno dello script di recupero",
+ "global_settings_setting_security_webadmin_allowlist": "Indirizzi IP con il permesso di accedere al webadmin, separati da virgola.",
+ "global_settings_setting_security_webadmin_allowlist_enabled": "Permetti solo ad alcuni IP di accedere al webadmin.",
+ "disk_space_not_sufficient_update": "Non c'è abbastanza spazio libero per aggiornare questa applicazione",
+ "disk_space_not_sufficient_install": "Non c'è abbastanza spazio libero per installare questa applicazione"
+}
\ No newline at end of file
diff --git a/locales/mk.json b/locales/mk.json
new file mode 100644
index 000000000..9e26dfeeb
--- /dev/null
+++ b/locales/mk.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/locales/nb_NO.json b/locales/nb_NO.json
index 0967ef424..dc217d74e 100644
--- a/locales/nb_NO.json
+++ b/locales/nb_NO.json
@@ -1 +1,120 @@
-{}
+{
+ "aborting": "Avbryter…",
+ "admin_password": "Administrasjonspassord",
+ "admin_password_change_failed": "Kan ikke endre passord",
+ "admin_password_changed": "Administrasjonspassord endret",
+ "admin_password_too_long": "Velg et passord kortere enn 127 tegn",
+ "app_already_installed": "{app} er allerede installert",
+ "app_already_up_to_date": "{app} er allerede oppdatert",
+ "app_argument_invalid": "Velg en gydlig verdi for argumentet '{name}': {error}",
+ "app_argument_required": "Argumentet '{name}' er påkrevd",
+ "app_id_invalid": "Ugyldig program-ID",
+ "dyndns_key_not_found": "Fant ikke DNS-nøkkel for domenet",
+ "app_not_correctly_installed": "{app} ser ikke ut til å ha blitt installert på riktig måte",
+ "dyndns_provider_unreachable": "Kunne ikke nå DynDNS-tilbyder {provider}: Enten har du ikke satt opp din YunoHost rett, dynette-tjeneren er nede, eller du mangler nett.",
+ "app_not_properly_removed": "{app} har ikke blitt fjernet på riktig måte",
+ "app_removed": "{app} fjernet",
+ "app_requirements_checking": "Sjekker påkrevde pakker for {app}…",
+ "app_start_install": "Installerer programmet '{app}'…",
+ "action_invalid": "Ugyldig handling '{action}'",
+ "app_start_restore": "Gjenoppretter programmet '{app}'…",
+ "backup_created": "Sikkerhetskopi opprettet",
+ "backup_archive_name_exists": "En sikkerhetskopi med dette navnet finnes allerede.",
+ "backup_archive_name_unknown": "Ukjent lokalt sikkerhetskopiarkiv ved navn '{name}'",
+ "already_up_to_date": "Ingenting å gjøre. Alt er oppdatert.",
+ "backup_method_copy_finished": "Sikkerhetskopi fullført",
+ "backup_method_tar_finished": "TAR-sikkerhetskopiarkiv opprettet",
+ "app_action_cannot_be_ran_because_required_services_down": "Dette programmet krever noen tjenester som ikke kjører. Før du fortsetter, du bør prøve å starte følgende tjenester på ny (og antagelig undersøke hvorfor de er nede): {services}",
+ "app_already_installed_cant_change_url": "Dette programmet er allerede installert. Nettadressen kan ikke endres kun med denne funksjonen. Ta en titt på `app changeurl` hvis den er tilgjengelig.",
+ "domain_exists": "Domenet finnes allerede",
+ "domains_available": "Tilgjengelige domener:",
+ "done": "Ferdig",
+ "downloading": "Laster ned…",
+ "dyndns_could_not_check_provide": "Kunne ikke sjekke om {provider} kan tilby {domain}.",
+ "dyndns_could_not_check_available": "Kunne ikke sjekke om {domain} er tilgjengelig på {provider}.",
+ "mail_domain_unknown": "Ukjent e-postadresse for domenet '{domain}'",
+ "log_remove_on_failed_restore": "Fjern '{}' etter mislykket gjenoppretting fra sikkerhetskopiarkiv",
+ "log_letsencrypt_cert_install": "Installer et Let's Encrypt-sertifikat på '{}'-domenet",
+ "log_letsencrypt_cert_renew": "Forny '{}'-Let's Encrypt-sertifikat",
+ "log_user_update": "Oppdater brukerinfo for '{}'",
+ "mail_alias_remove_failed": "Kunne ikke fjerne e-postaliaset '{mail}'",
+ "app_action_broke_system": "Denne handlingen ser ut til å ha knekt disse viktige tjenestene: {services}",
+ "app_argument_choice_invalid": "Bruk én av disse valgene '{choices}' for argumentet '{name}'",
+ "app_extraction_failed": "Kunne ikke pakke ut installasjonsfilene",
+ "app_install_files_invalid": "Disse filene kan ikke installeres",
+ "backup_abstract_method": "Denne sikkerhetskopimetoden er ikke implementert enda",
+ "backup_actually_backuping": "Oppretter sikkerhetskopiarkiv fra innsamlede filer…",
+ "backup_app_failed": "Kunne ikke sikkerhetskopiere programmet '{app}'",
+ "backup_applying_method_tar": "Lager TAR-sikkerhetskopiarkiv…",
+ "backup_archive_app_not_found": "Fant ikke programmet '{app}' i sikkerhetskopiarkivet",
+ "backup_archive_open_failed": "Kunne ikke åpne sikkerhetskopiarkivet",
+ "app_start_remove": "Fjerner programmet '{app}'…",
+ "app_start_backup": "Samler inn filer for sikkerhetskopiering for {app}…",
+ "backup_applying_method_copy": "Kopier alle filer til sikkerhetskopi…",
+ "backup_creation_failed": "Kunne ikke opprette sikkerhetskopiarkiv",
+ "backup_couldnt_bind": "Kunne ikke binde {src} til {dest}.",
+ "backup_csv_addition_failed": "Kunne ikke legge til filer for sikkerhetskopi inn i CSV-filen",
+ "backup_deleted": "Sikkerhetskopi slettet",
+ "backup_no_uncompress_archive_dir": "Det finnes ingen slik utpakket arkivmappe",
+ "backup_delete_error": "Kunne ikke slette '{path}'",
+ "certmanager_cert_signing_failed": "Kunne ikke signere det nye sertifikatet",
+ "extracting": "Pakker ut…",
+ "log_domain_add": "Legg til '{}'-domenet i systemoppsett",
+ "log_domain_remove": "Fjern '{}'-domenet fra systemoppsett",
+ "log_dyndns_subscribe": "Abonner på YunoHost-underdomenet '{}'",
+ "log_dyndns_update": "Oppdater IP-adressen tilknyttet ditt YunoHost-underdomene '{}'",
+ "backup_nothings_done": "Ingenting å lagre",
+ "field_invalid": "Ugyldig felt '{}'",
+ "firewall_reloaded": "Brannmur gjeninnlastet",
+ "log_app_change_url": "Endre nettadresse for '{}'-programmet",
+ "log_app_install": "Installer '{}'-programmet",
+ "log_app_remove": "Fjern '{}'-programmet",
+ "log_app_upgrade": "Oppgrader '{}'-programmet",
+ "log_app_makedefault": "Gjør '{}' til forvalgt program",
+ "log_available_on_yunopaste": "Denne loggen er nå tilgjengelig via {url}",
+ "log_tools_shutdown": "Slå av tjeneren din",
+ "log_tools_reboot": "Utfør omstart av tjeneren din",
+ "apps_already_up_to_date": "Alle programmer allerede oppdatert",
+ "backup_mount_archive_for_restore": "Forbereder arkiv for gjenopprettelse…",
+ "backup_copying_to_organize_the_archive": "Kopierer {size} MB for å organisere arkivet",
+ "domain_cannot_remove_main": "Kan ikke fjerne hoveddomene. Sett et først",
+ "domain_cert_gen_failed": "Kunne ikke opprette sertifikat",
+ "domain_created": "Domene opprettet",
+ "domain_creation_failed": "Kunne ikke opprette domene",
+ "domain_dyndns_root_unknown": "Ukjent DynDNS-rotdomene",
+ "dyndns_ip_update_failed": "Kunne ikke oppdatere IP-adresse til DynDNS",
+ "dyndns_ip_updated": "Oppdaterte din IP på DynDNS",
+ "dyndns_key_generating": "Oppretter DNS-nøkkel… Dette kan ta en stund.",
+ "dyndns_no_domain_registered": "Inget domene registrert med DynDNS",
+ "dyndns_registered": "DynDNS-domene registrert",
+ "global_settings_setting_security_password_admin_strength": "Admin-passordets styrke",
+ "dyndns_registration_failed": "Kunne ikke registrere DynDNS-domene: {error}",
+ "global_settings_setting_security_password_user_strength": "Brukerpassordets styrke",
+ "log_backup_restore_app": "Gjenopprett '{}' fra sikkerhetskopiarkiv",
+ "log_remove_on_failed_install": "Fjern '{}' etter mislykket installasjon",
+ "log_selfsigned_cert_install": "Installer selvsignert sertifikat på '{}'-domenet",
+ "log_user_delete": "Slett '{}' bruker",
+ "log_user_group_delete": "Slett '{}' gruppe",
+ "log_user_group_update": "Oppdater '{}' gruppe",
+ "app_unknown": "Ukjent program",
+ "app_upgrade_app_name": "Oppgraderer {app}…",
+ "app_upgrade_failed": "Kunne ikke oppgradere {app}",
+ "app_upgrade_some_app_failed": "Noen programmer kunne ikke oppgraderes",
+ "app_upgraded": "{app} oppgradert",
+ "ask_firstname": "Fornavn",
+ "ask_lastname": "Etternavn",
+ "ask_main_domain": "Hoveddomene",
+ "ask_new_admin_password": "Nytt administrasjonspassord",
+ "app_upgrade_several_apps": "Følgende programmer vil oppgraderes: {apps}",
+ "ask_new_domain": "Nytt domene",
+ "ask_new_path": "Ny sti",
+ "ask_password": "Passord",
+ "domain_deleted": "Domene slettet",
+ "domain_deletion_failed": "Kunne ikke slette domene",
+ "domain_dyndns_already_subscribed": "Du har allerede abonnement på et DynDNS-domene",
+ "log_link_to_log": "Full logg for denne operasjonen: '{desc}'",
+ "log_help_to_get_log": "For å vise loggen for operasjonen '{desc}', bruk kommandoen 'yunohost log show {name}'",
+ "log_user_create": "Legg til '{}' bruker",
+ "app_change_url_success": "{app} nettadressen er nå {domain}{path}",
+ "app_install_failed": "Kunne ikke installere {app}: {error}"
+}
\ No newline at end of file
diff --git a/locales/ne.json b/locales/ne.json
new file mode 100644
index 000000000..9bc5c0bfa
--- /dev/null
+++ b/locales/ne.json
@@ -0,0 +1,3 @@
+{
+ "password_too_simple_1": "पासवर्ड कम्तिमा characters अक्षर लामो हुनु आवश्यक छ"
+}
\ No newline at end of file
diff --git a/locales/nl.json b/locales/nl.json
index 166df89ff..5e612fc77 100644
--- a/locales/nl.json
+++ b/locales/nl.json
@@ -1,99 +1,63 @@
{
- "action_invalid": "Ongeldige actie '{action:s}'",
+ "action_invalid": "Ongeldige actie '{action}'",
"admin_password": "Administrator wachtwoord",
- "admin_password_changed": "Het administratie wachtwoord is gewijzigd",
- "app_already_installed": "{app:s} is al geïnstalleerd",
- "app_argument_invalid": "'{name:s}' bevat ongeldige waarde: {error:s}",
- "app_argument_required": "Het '{name:s}' moet ingevuld worden",
+ "admin_password_changed": "Het administratie wachtwoord werd gewijzigd",
+ "app_already_installed": "{app} is al geïnstalleerd",
+ "app_argument_invalid": "Kies een geldige waarde voor '{name}': {error}",
+ "app_argument_required": "Het '{name}' moet ingevuld worden",
"app_extraction_failed": "Kan installatiebestanden niet uitpakken",
"app_id_invalid": "Ongeldige app-id",
- "app_install_files_invalid": "Ongeldige installatiebestanden",
- "app_location_already_used": "Er is al een app geïnstalleerd op deze locatie",
- "app_location_install_failed": "Kan app niet installeren op deze locatie",
+ "app_install_files_invalid": "Deze bestanden kunnen niet worden geïnstalleerd",
"app_manifest_invalid": "Ongeldig app-manifest",
- "app_no_upgrade": "Geen apps op te upgraden",
- "app_not_installed": "{app:s} is niet geïnstalleerd",
- "app_recent_version_required": "{:s} vereist een nieuwere versie van moulinette",
- "app_removed": "{app:s} succesvol verwijderd",
- "app_sources_fetch_failed": "Kan bronbestanden niet ophalen",
+ "app_not_installed": "{app} is niet geïnstalleerd",
+ "app_removed": "{app} succesvol verwijderd",
+ "app_sources_fetch_failed": "Kan bronbestanden niet ophalen, klopt de URL?",
"app_unknown": "Onbekende app",
- "app_upgrade_failed": "Kan app {app:s} niet updaten",
- "app_upgraded": "{app:s} succesvol geüpgraded",
- "appslist_fetched": "App-lijst {appslist:s} succesvol opgehaald",
- "appslist_removed": "App-lijst {appslist:s} succesvol verwijderd",
- "appslist_unknown": "App-lijst {appslist:s} is onbekend.",
- "ask_current_admin_password": "Huidig administratorwachtwoord",
- "ask_email": "Email-adres",
+ "app_upgrade_failed": "Kan app {app} niet updaten",
+ "app_upgraded": "{app} succesvol geüpgraded",
"ask_firstname": "Voornaam",
"ask_lastname": "Achternaam",
"ask_new_admin_password": "Nieuw administratorwachtwoord",
"ask_password": "Wachtwoord",
"backup_archive_name_exists": "Een backuparchief met dezelfde naam bestaat al",
"backup_cleaning_failed": "Kan tijdelijke backup map niet leeg maken",
- "backup_creating_archive": "Backup wordt gestart...",
- "backup_invalid_archive": "Ongeldig backup archief",
"backup_output_directory_not_empty": "Doelmap is niet leeg",
- "backup_running_app_script": "Backup script voor app '{app:s}' is gestart...",
- "custom_app_url_required": "U moet een URL opgeven om uw aangepaste app {app:s} bij te werken",
- "custom_appslist_name_required": "U moet een naam opgeven voor uw aangepaste app-lijst",
- "dnsmasq_isnt_installed": "dnsmasq lijkt niet geïnstalleerd te zijn, voer alstublieft het volgende commando uit: 'apt-get remove bind9 && apt-get install dnsmasq'",
+ "custom_app_url_required": "U moet een URL opgeven om uw aangepaste app {app} bij te werken",
"domain_cert_gen_failed": "Kan certificaat niet genereren",
"domain_created": "Domein succesvol aangemaakt",
"domain_creation_failed": "Kan domein niet aanmaken",
"domain_deleted": "Domein succesvol verwijderd",
"domain_deletion_failed": "Kan domein niet verwijderen",
"domain_dyndns_already_subscribed": "U heeft reeds een domein bij DynDNS geregistreerd",
- "domain_dyndns_invalid": "Het domein is ongeldig voor DynDNS",
"domain_dyndns_root_unknown": "Onbekend DynDNS root domein",
"domain_exists": "Domein bestaat al",
"domain_uninstall_app_first": "Een of meerdere apps zijn geïnstalleerd op dit domein, verwijder deze voordat u het domein verwijdert",
- "domain_unknown": "Onbekend domein",
- "domain_zone_exists": "DNS zone bestand bestaat al",
- "domain_zone_not_found": "DNS zone bestand niet gevonden voor domein: {:s}",
"done": "Voltooid",
"downloading": "Downloaden...",
- "dyndns_cron_remove_failed": "De cron-job voor DynDNS kon niet worden verwijderd",
"dyndns_ip_update_failed": "Kan het IP adres niet updaten bij DynDNS",
"dyndns_ip_updated": "IP adres is aangepast bij DynDNS",
"dyndns_key_generating": "DNS sleutel word aangemaakt, wacht een moment...",
"dyndns_unavailable": "DynDNS subdomein is niet beschikbaar",
- "executing_script": "Script uitvoeren...",
"extracting": "Uitpakken...",
"installation_complete": "Installatie voltooid",
- "installation_failed": "Installatie gefaald",
- "ldap_initialized": "LDAP is klaar voor gebruik",
- "license_undefined": "Niet gedefinieerd",
- "mail_alias_remove_failed": "Kan mail-alias '{mail:s}' niet verwijderen",
- "monitor_stats_no_update": "Er zijn geen recente monitoringstatistieken bij te werken",
- "mysql_db_creation_failed": "Aanmaken MySQL database gefaald",
- "mysql_db_init_failed": "Initialiseren MySQL database gefaald",
- "mysql_db_initialized": "MySQL database is succesvol geïnitialiseerd",
- "network_check_smtp_ko": "Uitgaande mail (SMPT port 25) wordt blijkbaar geblokkeerd door uw het netwerk",
- "no_appslist_found": "Geen app-lijst gevonden",
- "no_internet_connection": "Server is niet verbonden met het internet",
- "no_ipv6_connectivity": "IPv6-stack is onbeschikbaar",
- "path_removal_failed": "Kan pad niet verwijderen {:s}",
+ "mail_alias_remove_failed": "Kan mail-alias '{mail}' niet verwijderen",
"pattern_email": "Moet een geldig emailadres bevatten (bv. abc@example.org)",
- "pattern_listname": "Slechts cijfers, letters en '_' zijn toegelaten",
"pattern_mailbox_quota": "Mailbox quota moet een waarde bevatten met b/k/M/G/T erachter of 0 om geen quota in te stellen",
"pattern_password": "Wachtwoord moet tenminste 3 karakters lang zijn",
- "port_already_closed": "Poort {port:d} is al gesloten voor {ip_version:s} verbindingen",
- "port_already_opened": "Poort {port:d} is al open voor {ip_version:s} verbindingen",
- "port_available": "Poort {port:d} is beschikbaar",
- "port_unavailable": "Poort {port:d} is niet beschikbaar",
- "restore_app_failed": "De app '{app:s}' kon niet worden terug gezet",
- "restore_hook_unavailable": "De herstel-hook '{hook:s}' is niet beschikbaar op dit systeem",
- "service_add_failed": "Kan service '{service:s}' niet toevoegen",
- "service_already_started": "Service '{service:s}' draait al",
- "service_cmd_exec_failed": "Kan '{command:s}' niet uitvoeren",
- "service_disabled": "Service '{service:s}' is uitgeschakeld",
- "service_remove_failed": "Kan service '{service:s}' niet verwijderen",
+ "port_already_closed": "Poort {port} is al gesloten voor {ip_version} verbindingen",
+ "port_already_opened": "Poort {port} is al open voor {ip_version} verbindingen",
+ "app_restore_failed": "De app '{app}' kon niet worden terug gezet: {error}",
+ "restore_hook_unavailable": "De herstel-hook '{part}' is niet beschikbaar op dit systeem",
+ "service_add_failed": "Kan service '{service}' niet toevoegen",
+ "service_already_started": "Service '{service}' draait al",
+ "service_cmd_exec_failed": "Kan '{command}' niet uitvoeren",
+ "service_disabled": "Service '{service}' is uitgeschakeld",
+ "service_remove_failed": "Kan service '{service}' niet verwijderen",
"service_removed": "Service werd verwijderd",
- "service_stop_failed": "Kan service '{service:s}' niet stoppen",
- "service_unknown": "De service '{service:s}' bestaat niet",
- "show_diff": "Let op de volgende verschillen zijn:\n{diff:s}",
+ "service_stop_failed": "Kan service '{service}' niet stoppen",
+ "service_unknown": "De service '{service}' bestaat niet",
"unexpected_error": "Er is een onbekende fout opgetreden",
- "unrestore_app": "App '{app:s}' wordt niet teruggezet",
+ "unrestore_app": "App '{app}' wordt niet teruggezet",
"updating_apt_cache": "Lijst van beschikbare pakketten wordt bijgewerkt...",
"upgrade_complete": "Upgrade voltooid",
"upgrading_packages": "Pakketten worden geüpdate...",
@@ -103,40 +67,50 @@
"upnp_port_open_failed": "Kan UPnP poorten niet openen",
"user_deleted": "Gebruiker werd verwijderd",
"user_home_creation_failed": "Kan de map voor deze gebruiker niet aanmaken",
- "user_unknown": "Gebruikersnaam {user:s} is onbekend",
+ "user_unknown": "Gebruikersnaam {user} is onbekend",
"user_update_failed": "Kan gebruiker niet bijwerken",
"yunohost_configured": "YunoHost configuratie is OK",
"admin_password_change_failed": "Wachtwoord kan niet veranderd worden",
- "app_argument_choice_invalid": "Ongeldige keuze voor argument '{name:s}'. Het moet een van de volgende keuzes zijn {choices:s}",
- "app_incompatible": "Deze applicatie is incompatibel met uw YunoHost versie",
- "app_not_correctly_installed": "{app:s} schijnt niet juist geïnstalleerd te zijn",
- "app_not_properly_removed": "{app:s} werd niet volledig verwijderd",
- "app_package_need_update": "Het is noodzakelijk om het app pakket te updaten, in navolging van veranderingen aan YunoHost",
- "app_requirements_checking": "Controleer noodzakelijke pakketten...",
- "app_requirements_failed": "Er wordt niet aan de aanvorderingen voldaan: {error}",
+ "app_argument_choice_invalid": "Ongeldige keuze voor argument '{name}'. Het moet een van de volgende keuzes zijn {choices}",
+ "app_not_correctly_installed": "{app} schijnt niet juist geïnstalleerd te zijn",
+ "app_not_properly_removed": "{app} werd niet volledig verwijderd",
+ "app_requirements_checking": "Noodzakelijke pakketten voor {app} aan het controleren...",
"app_requirements_unmeet": "Er wordt niet aan de aanvorderingen voldaan, het pakket {pkgname} ({version}) moet {spec} zijn",
"app_unsupported_remote_type": "Niet ondersteund besturings type voor de app",
- "appslist_retrieve_error": "Niet mogelijk om de externe applicatie lijst op te halen {appslist:s}: {error:s}",
- "appslist_retrieve_bad_format": "Opgehaald bestand voor applicatie lijst {appslist:s} is geen geldige applicatie lijst",
- "appslist_name_already_tracked": "Er is reeds een geregistreerde applicatie lijst met de naam {name:s}.",
- "appslist_url_already_tracked": "Er is reeds een geregistreerde applicatie lijst met de url {url:s}.",
- "appslist_migrating": "Migreer applicatielijst {appslist:s} ...",
- "appslist_could_not_migrate": "Kon applicatielijst {appslist:s} niet migreren! Niet in staat om de url te verwerken... De oude cron job is opgeslagen onder {bkp_file:s}.",
- "appslist_corrupted_json": "Kon de applicatielijst niet laden. Het schijnt, dat {filename:s} beschadigd is.",
- "ask_list_to_remove": "Te verwijderen lijst",
"ask_main_domain": "Hoofd-domein",
- "backup_action_required": "U moet iets om op te slaan uitkiezen",
- "backup_app_failed": "Kon geen backup voor app '{app:s}' aanmaken",
- "backup_archive_app_not_found": "App '{app:s}' kon niet in het backup archief gevonden worden",
- "backup_archive_broken_link": "Het backup archief kon niet geopend worden (Ongeldig verwijs naar {path:s})",
- "backup_archive_hook_not_exec": "Hook '{hook:s}' kon voor deze backup niet uitgevoerd worden",
- "backup_archive_name_unknown": "Onbekend lokaal backup archief namens '{name:s}' gevonden",
+ "backup_app_failed": "Kon geen backup voor app '{app}' aanmaken",
+ "backup_archive_app_not_found": "App '{app}' kon niet in het backup archief gevonden worden",
+ "backup_archive_broken_link": "Het backup archief kon niet geopend worden (Ongeldig verwijs naar {path})",
+ "backup_archive_name_unknown": "Onbekend lokaal backup archief namens '{name}' gevonden",
"backup_archive_open_failed": "Kan het backup archief niet openen",
"backup_created": "Backup aangemaakt",
"backup_creation_failed": "Aanmaken van backup mislukt",
- "backup_delete_error": "Kon pad '{path:s}' niet verwijderen",
+ "backup_delete_error": "Kon pad '{path}' niet verwijderen",
"backup_deleted": "Backup werd verwijderd",
- "backup_extracting_archive": "Backup archief uitpakken...",
- "backup_hook_unknown": "backup hook '{hook:s}' onbekend",
- "backup_nothings_done": "Niets om op te slaan"
-}
+ "backup_hook_unknown": "backup hook '{hook}' onbekend",
+ "backup_nothings_done": "Niets om op te slaan",
+ "password_too_simple_1": "Het wachtwoord moet minimaal 8 tekens lang zijn",
+ "already_up_to_date": "Er is niets te doen, alles is al up-to-date.",
+ "admin_password_too_long": "Gelieve een wachtwoord te kiezen met minder dan 127 karakters",
+ "app_action_cannot_be_ran_because_required_services_down": "De volgende diensten moeten actief zijn om deze actie uit te voeren: {services}. Probeer om deze te herstarten om verder te gaan (en om eventueel te onderzoeken waarom ze niet werken).",
+ "aborting": "Annulatie.",
+ "app_upgrade_app_name": "Bezig {app} te upgraden...",
+ "app_make_default_location_already_used": "Kan '{app}' niet de standaardapp maken op het domein, '{domain}' wordt al gebruikt door '{other_app}'",
+ "app_install_failed": "Kan {app} niet installeren: {error}",
+ "app_remove_after_failed_install": "Bezig de app te verwijderen na gefaalde installatie...",
+ "app_manifest_install_ask_domain": "Kies het domein waar deze app op geïnstalleerd moet worden",
+ "app_manifest_install_ask_path": "Kies het pad waar deze app geïnstalleerd moet worden",
+ "app_manifest_install_ask_admin": "Kies een administrator voor deze app",
+ "app_change_url_success": "{app} URL is nu {domain}{path}",
+ "app_full_domain_unavailable": "Sorry, deze app moet op haar eigen domein geïnstalleerd worden, maar andere apps zijn al geïnstalleerd op het domein '{domain}'. U kunt wel een subdomein aan deze app toewijden.",
+ "app_install_script_failed": "Er is een fout opgetreden in het installatiescript van de app",
+ "app_location_unavailable": "Deze URL is niet beschikbaar of is in conflict met de al geïnstalleerde app(s):\n{apps}",
+ "app_manifest_install_ask_password": "Kies een administratiewachtwoord voor deze app",
+ "app_manifest_install_ask_is_public": "Moet deze app zichtbaar zijn voor anomieme bezoekers?",
+ "app_not_upgraded": "De app '{failed_app}' kon niet upgraden en daardoor zijn de upgrades van de volgende apps geannuleerd: {apps}",
+ "app_start_install": "{app} installeren...",
+ "app_start_remove": "{app} verwijderen...",
+ "app_start_backup": "Bestanden aan het verzamelen voor de backup van {app}...",
+ "app_start_restore": "{app} herstellen...",
+ "app_upgrade_several_apps": "De volgende apps zullen worden geüpgraded: {apps}"
+}
\ No newline at end of file
diff --git a/locales/oc.json b/locales/oc.json
index fc6f6946c..a2a5bfe31 100644
--- a/locales/oc.json
+++ b/locales/oc.json
@@ -2,110 +2,77 @@
"admin_password": "Senhal d’administracion",
"admin_password_change_failed": "Impossible de cambiar lo senhal",
"admin_password_changed": "Lo senhal d’administracion es ben estat cambiat",
- "app_already_installed": "{app:s} es ja installat",
- "app_already_up_to_date": "{app:s} es ja a jorn",
+ "app_already_installed": "{app} es ja installat",
+ "app_already_up_to_date": "{app} es ja a jorn",
"installation_complete": "Installacion acabada",
- "app_id_invalid": "Id d’aplicacion incorrècte",
- "app_install_files_invalid": "Fichièrs d’installacion incorrèctes",
- "app_no_upgrade": "Pas cap d’aplicacion de metre a jorn",
- "app_not_correctly_installed": "{app:s} sembla pas ben installat",
- "app_not_installed": "{app:s} es pas installat",
- "app_not_properly_removed": "{app:s} es pas estat corrèctament suprimit",
- "app_removed": "{app:s} es estat suprimit",
+ "app_id_invalid": "ID d’aplicacion incorrècte",
+ "app_install_files_invalid": "Installacion impossibla d’aquestes fichièrs",
+ "app_not_correctly_installed": "{app} sembla pas ben installat",
+ "app_not_installed": "Impossible de trobar l’aplicacion {app} dins la lista de las aplicacions installadas : {all_apps}",
+ "app_not_properly_removed": "{app} es pas estat corrèctament suprimit",
+ "app_removed": "{app} es estada suprimida",
"app_unknown": "Aplicacion desconeguda",
- "app_upgrade_app_name": "Mesa a jorn de l’aplicacion {app}…",
- "app_upgrade_failed": "Impossible de metre a jorn {app:s}",
- "app_upgrade_some_app_failed": "D’aplicacions se pòdon pas metre a jorn",
- "app_upgraded": "{app:s} es estat mes a jorn",
- "appslist_fetched": "Recuperacion de la lista d’aplicacions {appslist:s} corrèctament realizada",
- "appslist_migrating": "Migracion de la lista d’aplicacion{appslist:s}…",
- "appslist_name_already_tracked": "I a ja una lista d’aplicacion enregistrada amb lo nom {name:s}.",
- "appslist_removed": "Supression de la lista d’aplicacions {appslist:s} corrèctament realizada",
- "appslist_retrieve_bad_format": "Lo fichièr recuperat per la lista d’aplicacions {appslist:s} es pas valid",
- "appslist_unknown": "La lista d’aplicacions {appslist:s} es desconeguda.",
- "appslist_url_already_tracked": "I a ja una lista d’aplicacions enregistrada amb l’URL {url:s}.",
- "ask_current_admin_password": "Senhal administrator actual",
- "ask_email": "Adreça de corrièl",
+ "app_upgrade_app_name": "Actualizacion de l’aplicacion {app}...",
+ "app_upgrade_failed": "Impossible d’actualizar {app} : {error}",
+ "app_upgrade_some_app_failed": "D’aplicacions se pòdon pas actualizar",
+ "app_upgraded": "{app} es estada actualizada",
"ask_firstname": "Prenom",
"ask_lastname": "Nom",
- "ask_list_to_remove": "Lista de suprimir",
"ask_main_domain": "Domeni màger",
"ask_new_admin_password": "Nòu senhal administrator",
"ask_password": "Senhal",
- "ask_path": "Camin",
- "backup_action_required": "Devètz precisar çò que cal salvagardar",
- "backup_app_failed": "Impossible de salvagardar l’aplicacion « {app:s} »",
- "backup_applying_method_copy": "Còpia de totes los fichièrs dins la salvagarda…",
- "backup_applying_method_tar": "Creacion de l’archiu tar de la salvagarda…",
- "backup_archive_name_exists": "Un archiu de salvagarda amb aquesta nom existís ja",
- "backup_archive_name_unknown": "L’archiu local de salvagarda apelat « {name:s} » es desconegut",
- "action_invalid": "Accion « {action:s} » incorrècta",
- "app_argument_choice_invalid": "Causida invalida pel paramètre « {name:s} », cal que siá un de {choices:s}",
- "app_argument_invalid": "Valor invalida pel paramètre « {name:s} » : {error:s}",
- "app_argument_required": "Lo paramètre « {name:s} » es requesit",
- "app_change_url_failed_nginx_reload": "La reaviada de nginx a fracassat. Vaquí la sortida de « nginx -t » :\n{nginx_errors:s}",
- "app_change_url_identical_domains": "L’ancian e lo novèl coble domeni/camin son identics per {domain:s}{path:s}, pas res a far.",
- "app_change_url_success": "L’URL de l’aplicacion {app:s} a cambiat per {domain:s}{path:s}",
- "app_checkurl_is_deprecated": "Packagers /!\\ ’app checkurl’ es obsolèt ! Utilizatz ’app register-url’ a la plaça !",
+ "backup_app_failed": "Impossible de salvagardar l’aplicacion « {app} »",
+ "backup_applying_method_copy": "Còpia de totes los fichièrs dins la salvagarda...",
+ "backup_applying_method_tar": "Creacion de l’archiu TAR de la salvagarda...",
+ "backup_archive_name_exists": "Un archiu de salvagarda amb aquesta nom existís ja.",
+ "backup_archive_name_unknown": "L’archiu local de salvagarda apelat « {name} » es desconegut",
+ "action_invalid": "Accion « {action} » incorrècta",
+ "app_argument_choice_invalid": "Utilizatz una de las opcions « {choices} » per l’argument « {name} »",
+ "app_argument_invalid": "Causissètz una valor invalida pel paramètre « {name} » : {error}",
+ "app_argument_required": "Lo paramètre « {name} » es requesit",
+ "app_change_url_identical_domains": "L’ancian e lo novèl coble domeni/camin son identics per {domain}{path}, pas res a far.",
+ "app_change_url_success": "L’URL de l’aplicacion {app} es ara {domain}{path}",
"app_extraction_failed": "Extraccion dels fichièrs d’installacion impossibla",
- "app_incompatible": "L’aplicacion {app} es pas compatibla amb vòstra version de YunoHost",
- "app_location_already_used": "L’aplicacion « {app} » es ja installada a aqueste emplaçament ({path})",
- "app_manifest_invalid": "Manifest d’aplicacion incorrècte : {error}",
- "app_package_need_update": "Lo paquet de l’aplicacion {app} deu èsser mes a jorn per seguir los cambiaments de YunoHost",
- "app_requirements_checking": "Verificacion dels paquets requesits per {app}…",
+ "app_manifest_invalid": "I a quicòm que truca amb lo manifest de l’aplicacion : {error}",
+ "app_requirements_checking": "Verificacion dels paquets requesits per {app}...",
"app_sources_fetch_failed": "Recuperacion dels fichièrs fonts impossibla, l’URL es corrècta ?",
"app_unsupported_remote_type": "Lo tipe alonhat utilizat per l’aplicacion es pas suportat",
- "appslist_retrieve_error": "Impossible de recuperar la lista d’aplicacions alonhadas {appslist:s} : {error:s}",
- "backup_archive_app_not_found": "L’aplicacion « {app:s} » es pas estada trobada dins l’archiu de la salvagarda",
- "backup_archive_broken_link": "Impossible d‘accedir a l’archiu de salvagarda (ligam invalid cap a {path:s})",
- "backup_archive_mount_failed": "Lo montatge de l’archiu de salvagarda a fracassat",
+ "backup_archive_app_not_found": "L’aplicacion « {app} » es pas estada trobada dins l’archiu de la salvagarda",
+ "backup_archive_broken_link": "Impossible d’accedir a l’archiu de salvagarda (ligam invalid cap a {path})",
"backup_archive_open_failed": "Impossible de dobrir l’archiu de salvagarda",
- "backup_archive_system_part_not_available": "La part « {part:s} » del sistèma es pas disponibla dins aquesta salvagarda",
+ "backup_archive_system_part_not_available": "La part « {part} » del sistèma es pas disponibla dins aquesta salvagarda",
"backup_cleaning_failed": "Impossible de netejar lo repertòri temporari de salvagarda",
- "backup_copying_to_organize_the_archive": "Còpia de {size:s} Mio per organizar l’archiu",
+ "backup_copying_to_organize_the_archive": "Còpia de {size} Mio per organizar l’archiu",
"backup_created": "Salvagarda acabada",
- "backup_creating_archive": "Creacion de l’archiu de salvagarda…",
- "backup_creation_failed": "Impossible de crear la salvagarda",
+ "backup_creation_failed": "Creacion impossibla de l’archiu de salvagarda",
"app_already_installed_cant_change_url": "Aquesta aplicacion es ja installada. Aquesta foncion pòt pas simplament cambiar l’URL. Agachatz « app changeurl » s’es disponible.",
- "app_change_no_change_url_script": "L’aplicacion {app_name:s} pren pas en compte lo cambiament d’URL, poiretz aver de la metre a jorn.",
- "app_change_url_no_script": "L’aplicacion {app_name:s} pren pas en compte lo cambiament d’URL, benlèu que vos cal la metre a jorn.",
+ "app_change_url_no_script": "L’aplicacion {app_name} pren pas en compte lo cambiament d’URL, benlèu que vos cal l’actualizar.",
"app_make_default_location_already_used": "Impossible de configurar l’aplicacion « {app} » per defaut pel domeni {domain} perque es ja utilizat per l’aplicacion {other_app}",
- "app_location_install_failed": "Impossible d’installar l’aplicacion a aqueste emplaçament per causa de conflicte amb l’aplicacion {other_app} qu’es ja installada sus {other_path}",
- "app_location_unavailable": "Aquesta URL es pas disponibla o en conflicte amb una aplicacion existenta :\n{apps:s}",
- "appslist_corrupted_json": "Cargament impossible de la lista d’aplicacion. Sembla que {filename:s} siá gastat.",
- "backup_delete_error": "Impossible de suprimir « {path:s} »",
+ "app_location_unavailable": "Aquesta URL es pas disponibla o en conflicte amb una aplicacion existenta :\n{apps}",
+ "backup_delete_error": "Supression impossibla de « {path} »",
"backup_deleted": "La salvagarda es estada suprimida",
- "backup_hook_unknown": "Script de salvagarda « {hook:s} » desconegut",
- "backup_invalid_archive": "Archiu de salvagarda incorrècte",
- "backup_method_borg_finished": "La salvagarda dins Borg es acabada",
+ "backup_hook_unknown": "Script de salvagarda « {hook} » desconegut",
"backup_method_copy_finished": "La còpia de salvagarda es acabada",
- "backup_method_tar_finished": "L’archiu tar de la salvagarda es estat creat",
- "backup_output_directory_not_empty": "Lo dorsièr de sortida es pas void",
+ "backup_method_tar_finished": "L’archiu TAR de la salvagarda es estat creat",
+ "backup_output_directory_not_empty": "Devètz causir un dorsièr de sortida void",
"backup_output_directory_required": "Vos cal especificar un dorsièr de sortida per la salvagarda",
- "backup_running_app_script": "Lançament de l’escript de salvagarda de l’aplicacion « {app:s} »...",
- "backup_running_hooks": "Execucion dels scripts de salvagarda…",
- "backup_system_part_failed": "Impossible de salvagardar la part « {part:s} » del sistèma",
- "app_requirements_failed": "Impossible de complir las condicions requesidas per {app} : {error}",
+ "backup_running_hooks": "Execucion dels scripts de salvagarda...",
+ "backup_system_part_failed": "Impossible de salvagardar la part « {part} » del sistèma",
"app_requirements_unmeet": "Las condicions requesidas per {app} son pas complidas, lo paquet {pkgname} ({version}) deu èsser {spec}",
- "appslist_could_not_migrate": "Migracion de la lista impossibla {appslist:s} ! Impossible d’analizar l’URL… L’anciana tasca cron es estada servada dins {bkp_file:s}.",
"backup_abstract_method": "Aqueste metòde de salvagarda es pas encara implementat",
- "backup_applying_method_custom": "Crida lo metòde de salvagarda personalizat « {method:s} »…",
- "backup_borg_not_implemented": "Lo metòde de salvagarda Bord es pas encara implementat",
- "backup_couldnt_bind": "Impossible de ligar {src:s} amb {dest:s}.",
+ "backup_applying_method_custom": "Crida del metòde de salvagarda personalizat « {method} »...",
+ "backup_couldnt_bind": "Impossible de ligar {src} amb {dest}.",
"backup_csv_addition_failed": "Impossible d’ajustar de fichièrs a la salvagarda dins lo fichièr CSV",
"backup_custom_backup_error": "Fracàs del metòde de salvagarda personalizat a l’etapa « backup »",
"backup_custom_mount_error": "Fracàs del metòde de salvagarda personalizat a l’etapa « mount »",
- "backup_custom_need_mount_error": "Fracàs del metòde de salvagarda personalizat a l’etapa « need_mount »",
- "backup_method_custom_finished": "Lo metòde de salvagarda personalizat « {method:s} » es acabat",
+ "backup_method_custom_finished": "Lo metòde de salvagarda personalizat « {method} » es acabat",
"backup_nothings_done": "I a pas res de salvagardar",
"backup_unable_to_organize_files": "Impossible d’organizar los fichièrs dins l’archiu amb lo metòde rapid",
- "service_status_failed": "Impossible de determinar l’estat del servici « {service:s} »",
- "service_stopped": "Lo servici « {service:s} » es estat arrestat",
- "service_unknown": "Servici « {service:s} » desconegut",
- "unbackup_app": "L’aplicacion « {app:s} » serà pas salvagardada",
- "unit_unknown": "Unitat « {unit:s} » desconeguda",
+ "service_stopped": "Lo servici « {service} » es estat arrestat",
+ "service_unknown": "Servici « {service} » desconegut",
+ "unbackup_app": "L’aplicacion « {app} » serà pas salvagardada",
"unlimit": "Cap de quòta",
- "unrestore_app": "L’aplicacion « {app:s} » serà pas restaurada",
+ "unrestore_app": "L’aplicacion « {app} » serà pas restaurada",
"upnp_dev_not_found": "Cap de periferic compatible UPnP pas trobat",
"upnp_disabled": "UPnP es desactivat",
"upnp_enabled": "UPnP es activat",
@@ -113,177 +80,101 @@
"yunohost_already_installed": "YunoHost es ja installat",
"yunohost_configured": "YunoHost es estat configurat",
"yunohost_installing": "Installacion de YunoHost…",
- "backup_applying_method_borg": "Mandadís de totes los fichièrs a la salvagarda dins lo repertòri borg-backup…",
"backup_csv_creation_failed": "Creacion impossibla del fichièr CSV necessari a las operacions futuras de restauracion",
- "backup_extracting_archive": "Extraccion de l’archiu de salvagarda…",
- "backup_output_symlink_dir_broken": "Avètz un ligam simbolic copat allòc de vòstre repertòri d’archiu « {path:s} ». Poiriatz aver una configuracion personalizada per salvagardar vòstras donadas sus un autre sistèma de fichièrs, en aquel cas, saique oblidèretz de montar o de connectar lo disc o la clau USB.",
- "backup_with_no_backup_script_for_app": "L’aplicacion {app:s} a pas cap de script de salvagarda. I fasèm pas cas.",
- "backup_with_no_restore_script_for_app": "L’aplicacion {app:s} a pas cap de script de restauracion, poiretz pas restaurar automaticament la salvagarda d’aquesta aplicacion.",
- "certmanager_acme_not_configured_for_domain": "Lo certificat del domeni {domain:s} sembla pas corrèctament installat. Mercés de lançar d’en primièr cert-install per aqueste domeni.",
- "certmanager_attempt_to_renew_nonLE_cert": "Lo certificat pel domeni {domain:s} es pas provesit per Let’s Encrypt. Impossible de lo renovar automaticament !",
- "certmanager_attempt_to_renew_valid_cert": "Lo certificat pel domeni {domain:s} es a man d’expirar ! (Podètz utilizar --force se sabètz çò que fasètz)",
- "certmanager_cannot_read_cert": "Quicòm a trucat en ensajar de dobrir lo certificat actual pel domeni {domain:s} (fichièr : {file:s}), rason : {reason:s}",
- "certmanager_cert_install_success": "Installacion capitada del certificat Let’s Encrypt pel domeni {domain:s} !",
- "certmanager_cert_install_success_selfsigned": "Installacion capitada del certificat auto-signat pel domeni {domain:s} !",
- "certmanager_cert_signing_failed": "Fracàs de la signatura del nòu certificat",
- "certmanager_domain_cert_not_selfsigned": "Lo certificat del domeni {domain:s} es pas auto-signat. Volètz vertadièrament lo remplaçar ? (Utiliatz --force)",
- "certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrament DNS « A » del domeni {domain:s} es diferent de l’adreça IP d’aqueste servidor. Se fa pauc qu’avètz modificat l’enregistrament « A », mercés d’esperar l’espandiment (qualques verificadors d’espandiment son disponibles en linha). (Se sabètz çò que fasèm, utilizatz --no-checks per desactivar aqueles contraròtles)",
- "certmanager_domain_http_not_working": "Sembla que lo domeni {domain:s} es pas accessible via HTTP. Mercés de verificar que las configuracions DNS e nginx son corrèctas",
- "certmanager_domain_unknown": "Domeni desconegut {domain:s}",
- "certmanager_no_cert_file": "Lectura impossibla del fichièr del certificat pel domeni {domain:s} (fichièr : {file:s})",
- "certmanager_self_ca_conf_file_not_found": "Lo fichièr de configuracion per l’autoritat del certificat auto-signat es introbabla (fichièr : {file:s})",
- "certmanager_unable_to_parse_self_CA_name": "Analisi impossible lo nom de l’autoritat del certificat auto-signat (fichièr : {file:s})",
- "custom_app_url_required": "Cal que donetz una URL per actualizar vòstra aplicacion personalizada {app:s}",
- "custom_appslist_name_required": "Cal que nomenetz vòstra lista d’aplicacions personalizadas",
- "diagnosis_debian_version_error": "Impossible de determinar la version de Debian : {error}",
- "diagnosis_kernel_version_error": "Impossible de recuperar la version del nuclèu : {error}",
- "diagnosis_no_apps": "Pas cap d’aplicacion installada",
- "dnsmasq_isnt_installed": "dnsmasq sembla pas èsser installat, mercés de lançar « apt-get remove bind9 && apt-get install dnsmasq »",
+ "backup_output_symlink_dir_broken": "Vòstre repertòri d’archiu « {path} » es un ligam simbolic copat. Saique oblidèretz de re/montar o de connectar supòrt.",
+ "backup_with_no_backup_script_for_app": "L’aplicacion {app} a pas cap de script de salvagarda. I fasèm pas cas.",
+ "backup_with_no_restore_script_for_app": "{app} a pas cap de script de restauracion, poiretz pas restaurar automaticament la salvagarda d’aquesta aplicacion.",
+ "certmanager_acme_not_configured_for_domain": "Lo certificat pel domeni {domain} sembla pas corrèctament installat. Mercés de lançar d’en primièr « cert-install » per aqueste domeni.",
+ "certmanager_attempt_to_renew_nonLE_cert": "Lo certificat pel domeni {domain} es pas provesit per Let’s Encrypt. Impossible de lo renovar automaticament !",
+ "certmanager_attempt_to_renew_valid_cert": "Lo certificat pel domeni {domain} es a man d’expirar ! (Podètz utilizar --force se sabètz çò que fasètz)",
+ "certmanager_cannot_read_cert": "Quicòm a trucat en ensajar de dobrir lo certificat actual pel domeni {domain} (fichièr : {file}), rason : {reason}",
+ "certmanager_cert_install_success": "Lo certificat Let’s Encrypt es ara installat pel domeni « {domain} »",
+ "certmanager_cert_install_success_selfsigned": "Lo certificat auto-signat es ara installat pel domeni « {domain} »",
+ "certmanager_cert_signing_failed": "Signatura impossibla del nòu certificat",
+ "certmanager_domain_cert_not_selfsigned": "Lo certificat pel domeni {domain} es pas auto-signat. Volètz vertadièrament lo remplaçar ? (Utilizatz « --force » per o far)",
+ "certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrament DNS « A » pel domeni {domain} es diferent de l’adreça IP d’aqueste servidor. Se fa pauc qu’avètz modificat l’enregistrament « A », mercés d’esperar l’espandiment (qualques verificadors d’espandiment son disponibles en linha). (Se sabètz çò que fasèm, utilizatz --no-checks per desactivar aqueles contraròtles)",
+ "certmanager_domain_http_not_working": "Sembla que lo domeni {domain} es pas accessible via HTTP. Mercés de verificar que las configuracions DNS e NGINK son corrèctas",
+ "certmanager_no_cert_file": "Lectura impossibla del fichièr del certificat pel domeni {domain} (fichièr : {file})",
+ "certmanager_self_ca_conf_file_not_found": "Impossible de trobar lo fichièr de configuracion per l’autoritat del certificat auto-signat (fichièr : {file})",
+ "certmanager_unable_to_parse_self_CA_name": "Analisi impossibla del nom de l’autoritat del certificat auto-signat (fichièr : {file})",
+ "custom_app_url_required": "Cal que donetz una URL per actualizar vòstra aplicacion personalizada {app}",
"domain_cannot_remove_main": "Impossible de levar lo domeni màger. Definissètz un novèl domeni màger d’en primièr",
"domain_cert_gen_failed": "Generacion del certificat impossibla",
- "domain_created": "Lo domeni es creat",
- "domain_creation_failed": "Creacion del certificat impossibla",
- "domain_deleted": "Lo domeni es suprimit",
- "domain_deletion_failed": "Supression impossibla del domeni",
- "domain_dyndns_invalid": "Domeni incorrècte per una utilizacion amb DynDNS",
+ "domain_created": "Domeni creat",
+ "domain_creation_failed": "Creacion del domeni {domain}: impossibla",
+ "domain_deleted": "Domeni suprimit",
+ "domain_deletion_failed": "Supression impossibla del domeni {domain}: {error}",
"domain_dyndns_root_unknown": "Domeni DynDNS màger desconegut",
"domain_exists": "Lo domeni existís ja",
- "domain_hostname_failed": "Fracàs de la creacion d’un nòu nom d’òst",
- "domain_unknown": "Domeni desconegut",
- "domain_zone_exists": "Lo fichièr zòna DNS existís ja",
- "domain_zone_not_found": "Fichèr de zòna DNS introbable pel domeni {:s}",
+ "domain_hostname_failed": "Fracàs de la creacion d’un nòu nom d’òst. Aquò poirà provocar de problèmas mai tard (mas es pas segur… benlèu que coparà pas res).",
"domains_available": "Domenis disponibles :",
"done": "Acabat",
"downloading": "Telecargament…",
- "dyndns_could_not_check_provide": "Impossible de verificar se {provider:s} pòt provesir {domain:s}.",
- "dyndns_cron_installed": "La tasca cron pel domeni DynDNS es installada",
- "dyndns_cron_remove_failed": "Impossible de levar la tasca cron pel domeni DynDNS",
- "dyndns_cron_removed": "La tasca cron pel domeni DynDNS es levada",
+ "dyndns_could_not_check_provide": "Impossible de verificar se {provider} pòt provesir {domain}.",
"dyndns_ip_update_failed": "Impossible d’actualizar l’adreça IP sul domeni DynDNS",
- "dyndns_ip_updated": "Vòstra adreça IP es estada actualizada pel domeni DynDNS",
- "dyndns_key_generating": "La clau DNS es a se generar, pòt trigar una estona…",
+ "dyndns_ip_updated": "Vòstra adreça IP actualizada pel domeni DynDNS",
+ "dyndns_key_generating": "La clau DNS es a se generar… pòt trigar una estona.",
"dyndns_key_not_found": "Clau DNS introbabla pel domeni",
"dyndns_no_domain_registered": "Cap de domeni pas enregistrat amb DynDNS",
- "dyndns_registered": "Lo domeni DynDNS es enregistrat",
- "dyndns_registration_failed": "Enregistrament del domeni DynDNS impossibla : {error:s}",
- "dyndns_domain_not_provided": "Lo provesidor DynDNS {provider:s} pòt pas fornir lo domeni {domain:s}.",
- "dyndns_unavailable": "Lo domeni {domain:s} es pas disponible.",
+ "dyndns_registered": "Domeni DynDNS enregistrat",
+ "dyndns_registration_failed": "Enregistrament del domeni DynDNS impossible : {error}",
+ "dyndns_domain_not_provided": "Lo provesidor DynDNS {provider} pòt pas fornir lo domeni {domain}.",
+ "dyndns_unavailable": "Lo domeni {domain} es pas disponible.",
"extracting": "Extraccion…",
- "field_invalid": "Camp incorrècte : « {:s} »",
- "format_datetime_short": "%d/%m/%Y %H:%M",
- "global_settings_cant_open_settings": "Fracàs de la dobertura del fichièr de configuracion, rason : {reason:s}",
- "global_settings_key_doesnt_exists": "La clau « {settings_key:s} » existís pas dins las configuracions globalas, podètz veire totas las claus disponiblas en picant « yunohost settings list »",
- "global_settings_reset_success": "Capitada ! Vòstra configuracion precedenta es estada salvagarda dins {path:s}",
- "global_settings_setting_example_bool": "Exemple d’opcion booleana",
- "global_settings_unknown_setting_from_settings_file": "Clau desconeguda dins los paramètres : {setting_key:s}, apartada e salvagardada dins /etc/yunohost/settings-unknown.json",
- "installation_failed": "Fracàs de l’installacion",
- "invalid_url_format": "Format d’URL pas valid",
- "ldap_initialized": "L’annuari LDAP es inicializat",
- "license_undefined": "indefinida",
- "maindomain_change_failed": "Modificacion impossibla del domeni màger",
- "maindomain_changed": "Lo domeni màger es estat modificat",
- "migrate_tsig_end": "La migracion cap a hmac-sha512 es acabada",
- "migrate_tsig_wait_2": "2 minutas…",
- "migrate_tsig_wait_3": "1 minuta…",
- "migrate_tsig_wait_4": "30 segondas…",
- "migration_description_0002_migrate_to_tsig_sha256": "Melhora la seguretat de DynDNS TSIG en utilizar SHA512 allòc de MD5",
- "migration_description_0003_migrate_to_stretch": "Mesa a nivèl del sistèma cap a Debian Stretch e YunoHost 3.0",
- "migration_0003_backward_impossible": "La migracion Stretch es pas reversibla.",
- "migration_0003_start": "Aviada de la migracion cap a Stretech. Los jornals seràn disponibles dins {logfile}.",
- "migration_0003_patching_sources_list": "Petaçatge de sources.lists…",
- "migration_0003_main_upgrade": "Aviada de la mesa a nivèl màger…",
- "migration_0003_fail2ban_upgrade": "Aviada de la mesa a nivèl de fail2ban…",
- "migration_0003_not_jessie": "La distribucion Debian actuala es pas Jessie !",
+ "field_invalid": "Camp incorrècte : « {} »",
+ "global_settings_cant_open_settings": "Fracàs de la dobertura del fichièr de configuracion, rason : {reason}",
+ "global_settings_key_doesnt_exists": "La clau « {settings_key} » existís pas dins las configuracions globalas, podètz veire totas las claus disponiblas en executant « yunohost settings list »",
+ "global_settings_reset_success": "Configuracion precedenta ara salvagarda dins {path}",
+ "global_settings_unknown_setting_from_settings_file": "Clau desconeguda dins los paramètres : {setting_key}, apartada e salvagardada dins /etc/yunohost/settings-unknown.json",
+ "main_domain_change_failed": "Modificacion impossibla del domeni màger",
+ "main_domain_changed": "Lo domeni màger es estat modificat",
"migrations_cant_reach_migration_file": "Impossible d’accedir als fichièrs de migracion amb lo camin %s",
- "migrations_current_target": "La cibla de migracion est {}",
- "migrations_error_failed_to_load_migration": "ERROR : fracàs del cargament de la migracion {number} {name}",
"migrations_list_conflict_pending_done": "Podètz pas utilizar --previous e --done a l’encòp.",
- "migrations_loading_migration": "Cargament de la migracion{number} {name}…",
+ "migrations_loading_migration": "Cargament de la migracion {id}…",
"migrations_no_migrations_to_run": "Cap de migracion de lançar",
- "migrations_show_currently_running_migration": "Realizacion de la migracion {number} {name}…",
- "migrations_show_last_migration": "La darrièra migracion realizada es {}",
- "monitor_glances_con_failed": "Connexion impossibla al servidor Glances",
- "monitor_not_enabled": "Lo seguiment de l’estat del servidor es pas activat",
- "monitor_stats_no_update": "Cap de donadas d’estat del servidor d’actualizar",
- "mountpoint_unknown": "Ponch de montatge desconegut",
- "mysql_db_creation_failed": "Creacion de la basa de donadas MySQL impossibla",
- "no_appslist_found": "Cap de lista d’aplicacions pas trobada",
- "no_internet_connection": "Lo servidor es pas connectat a Internet",
- "package_not_installed": "Lo paquet « {pkgname} » es pas installat",
- "package_unknown": "Paquet « {pkgname} » desconegut",
- "packages_no_upgrade": "I a pas cap de paquet d’actualizar",
"packages_upgrade_failed": "Actualizacion de totes los paquets impossibla",
- "path_removal_failed": "Impossible de suprimir lo camin {:s}",
"pattern_domain": "Deu èsser un nom de domeni valid (ex : mon-domeni.org)",
"pattern_email": "Deu èsser una adreça electronica valida (ex : escais@domeni.org)",
"pattern_firstname": "Deu èsser un pichon nom valid",
"pattern_lastname": "Deu èsser un nom valid",
"pattern_password": "Deu conténer almens 3 caractèrs",
- "pattern_port": "Deu èsser un numèro de pòrt valid (ex : 0-65535)",
"pattern_port_or_range": "Deu èsser un numèro de pòrt valid (ex : 0-65535) o un interval de pòrt (ex : 100:200)",
- "pattern_positive_number": "Deu èsser un nombre positiu",
- "port_already_closed": "Lo pòrt {port:d} es ja tampat per las connexions {ip_version:s}",
- "port_already_opened": "Lo pòrt {port:d} es ja dubèrt per las connexions {ip_version:s}",
- "port_available": "Lo pòrt {port:d} es disponible",
- "port_unavailable": "Lo pòrt {port:d} es pas disponible",
- "restore_already_installed_app": "Una aplicacion es ja installada amb l’id « {app:s} »",
- "restore_app_failed": "Impossible de restaurar l’aplicacion « {app:s} »",
- "backup_ask_for_copying_if_needed": "D’unes fichièrs an pas pogut èsser preparatz per la salvagarda en utilizar lo metòde qu’evita de gastar d’espaci sul sistèma de manièra temporària. Per lançar la salvagarda, cal utilizar temporàriament {size:s} Mo. Acceptatz ?",
+ "port_already_closed": "Lo pòrt {port} es ja tampat per las connexions {ip_version}",
+ "port_already_opened": "Lo pòrt {port} es ja dubèrt per las connexions {ip_version}",
+ "restore_already_installed_app": "Una aplicacion es ja installada amb l’id « {app} »",
+ "app_restore_failed": "Impossible de restaurar l’aplicacion « {app} »: {error}",
+ "backup_ask_for_copying_if_needed": "Volètz far una salvagarda en utilizant {size} Mo temporàriament ? (Aqueste biais de far es emplegat perque unes fichièrs an pas pogut èsser preparats amb un metòde mai eficaç.)",
"yunohost_not_installed": "YunoHost es pas installat o corrèctament installat. Mercés d’executar « yunohost tools postinstall »",
- "backup_output_directory_forbidden": "Repertòri de destinacion defendut. Las salvagardas pòdon pas se realizar dins los repertòris bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives",
- "certmanager_attempt_to_replace_valid_cert": "Sètz a remplaçar un certificat corrècte e valid pel domeni {domain:s} ! (Utilizatz --force per cortcircuitar)",
- "certmanager_cert_renew_success": "Renovèlament capitat d’un certificat Let’s Encrypt pel domeni {domain:s} !",
- "certmanager_certificate_fetching_or_enabling_failed": "Sembla d’aver fracassat l’activacion d’un nòu certificat per {domain:s}…",
- "certmanager_conflicting_nginx_file": "Impossible de preparar lo domeni pel desfís ACME : lo fichièr de configuracion nginx {filepath:s} es en conflicte e deu èsser levat d’en primièr",
- "certmanager_couldnt_fetch_intermediate_cert": "Expiracion del relambi pendent l’ensag de recuperacion del certificat intermediari dins de Let’s Encrypt. L’installacion / lo renovèlament es estat interromput - tornatz ensajar mai tard.",
- "certmanager_domain_not_resolved_locally": "Lo domeni {domain:s} pòt pas èsser determinat dins de vòstre servidor YunoHost. Pòt arribar s’avètz recentament modificat vòstre enregistrament DNS. Dins aqueste cas, mercés d’esperar unas oras per l’espandiment. Se lo problèma dura, consideratz ajustar {domain:s} a /etc/hosts. (Se sabètz çò que fasètz, utilizatz --no-checks per desactivar las verificacions.)",
- "certmanager_error_no_A_record": "Cap d’enregistrament DNS « A » pas trobat per {domain:s}. Vos cal indicar que lo nom de domeni mene a vòstra maquina per poder installar un certificat Let’S Encrypt ! (Se sabètz çò que fasètz, utilizatz --no-checks per desactivar las verificacions.)",
- "certmanager_hit_rate_limit": "Tròp de certificats son ja estats demandats recentament per aqueste ensem de domeni {domain:s}. Mercés de tornar ensajar mai tard. Legissètz https://letsencrypt.org/docs/rate-limits/ per mai detalhs",
- "certmanager_http_check_timeout": "Expiracion del relambi d’ensag del servidor de se contactar via HTTP amb son adreça IP publica {domain:s} amb l’adreça {ip:s}. Coneissètz benlèu de problèmas d’hairpinning o lo parafuòc/router amont de vòstre servidor es mal configurat.",
+ "backup_output_directory_forbidden": "Causissètz un repertòri de destinacion deferent. Las salvagardas pòdon pas se realizar dins los repertòris bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives",
+ "certmanager_attempt_to_replace_valid_cert": "Sètz a remplaçar un certificat corrècte e valid pel domeni {domain} ! (Utilizatz --force per cortcircuitar)",
+ "certmanager_cert_renew_success": "Renovèlament capitat d’un certificat Let’s Encrypt pel domeni « {domain} »",
+ "certmanager_certificate_fetching_or_enabling_failed": "Sembla qu’utilizar lo nòu certificat per {domain} fonciona pas...",
+ "certmanager_hit_rate_limit": "Tròp de certificats son ja estats demandats recentament per aqueste ensem de domeni {domain}. Mercés de tornar ensajar mai tard. Legissètz https://letsencrypt.org/docs/rate-limits/ per mai detalhs",
"domain_dns_conf_is_just_a_recommendation": "Aqueste pagina mòstra la configuracion *recomandada*. Non configura *pas* lo DNS per vos. Sètz responsable de la configuracion de vòstra zòna DNS en çò de vòstre registrar DNS amb aquesta recomandacion.",
"domain_dyndns_already_subscribed": "Avètz ja soscrich a un domeni DynDNS",
- "domain_dyndns_dynette_is_unreachable": "Impossible de contactar la dynette YunoHost, siá YunoHost pas es pas corrèctament connectat a Internet, siá lo servidor de la dynett es arrestat. Error : {error}",
"domain_uninstall_app_first": "Una o mantuna aplicacions son installadas sus aqueste domeni. Mercés de las desinstallar d’en primièr abans de suprimir aqueste domeni",
"firewall_reload_failed": "Impossible de recargar lo parafuòc",
- "firewall_reloaded": "Lo parafuòc es estat recargat",
+ "firewall_reloaded": "Parafuòc recargat",
"firewall_rules_cmd_failed": "Unas règlas del parafuòc an fracassat. Per mai informacions, consultatz lo jornal.",
- "global_settings_bad_choice_for_enum": "La valor del paramètre {setting:s} es incorrècta. Recebut : {received_type:s}, mas las opcions esperadas son : {expected_type:s}",
- "global_settings_bad_type_for_setting": "Lo tipe del paramètre {setting:s} es incorrècte. Recebut : {received_type:s}, esperat {expected_type:s}",
- "global_settings_cant_write_settings": "Fracàs de l’escritura del fichièr de configuracion, rason : {reason:s}",
- "global_settings_setting_example_enum": "Exemple d’opcion de tipe enumeracion",
- "global_settings_setting_example_int": "Exemple d’opcion de tipe entièr",
- "global_settings_setting_example_string": "Exemple d’opcion de tipe cadena",
- "global_settings_unknown_type": "Situacion inesperada, la configuracion {setting:s} sembla d’aver lo tipe {unknown_type:s} mas es pas un tipe pres en carga pel sistèma.",
- "hook_exec_failed": "Fracàs de l’execucion del script « {path:s} »",
- "hook_exec_not_terminated": "L’execucion del escript « {path:s} » es pas acabada",
+ "global_settings_bad_choice_for_enum": "La valor del paramètre {setting} es incorrècta. Recebut : {choice}, mas las opcions esperadas son : {available_choices}",
+ "global_settings_bad_type_for_setting": "Lo tipe del paramètre {setting} es incorrècte, recebut : {received_type}, esperat {expected_type}",
+ "global_settings_cant_write_settings": "Fracàs de l’escritura del fichièr de configuracion, rason : {reason}",
+ "global_settings_unknown_type": "Situacion inesperada, la configuracion {setting} sembla d’aver lo tipe {unknown_type} mas es pas un tipe pres en carga pel sistèma.",
+ "hook_exec_failed": "Fracàs de l’execucion del script : « {path} »",
+ "hook_exec_not_terminated": "Lo escript « {path} » a pas acabat corrèctament",
"hook_list_by_invalid": "La proprietat de tria de las accions es invalida",
- "hook_name_unknown": "Nom de script « {name:s} » desconegut",
- "ldap_init_failed_to_create_admin": "L’inicializacion de LDAP a pas pogut crear l’utilizaire admin",
- "mail_domain_unknown": "Lo domeni de corrièl « {domain:s} » es desconegut",
+ "hook_name_unknown": "Nom de script « {name} » desconegut",
+ "mail_domain_unknown": "Lo domeni de corrièl « {domain} » es desconegut",
"mailbox_used_space_dovecot_down": "Lo servici corrièl Dovecot deu èsser aviat, se volètz conéisser l’espaci ocupat per la messatjariá",
- "migrate_tsig_failed": "La migracion del domeni dyndns {domain} cap a hmac-sha512 a pas capitat, anullacion de las modificacions. Error : {error_code} - {error}",
- "migrate_tsig_wait": "Esperem 3 minutas que lo servidor dyndns prenga en compte la novèla clau…",
- "migrate_tsig_not_needed": "Sembla qu’utilizatz pas un domeni dyndns, donc cap de migracion es pas necessària !",
- "migration_0003_yunohost_upgrade": "Aviada de la mesa a nivèl del paquet YunoHost… La migracion acabarà, mas la mesa a jorn reala se realizarà tot bèl aprèp. Un còp acabada, poiretz vos reconnectar a l’administracion web.",
- "migration_0003_system_not_fully_up_to_date": "Lo sistèma es pas complètament a jorn. Mercés de lançar una mesa a jorn classica abans de començar la migracion per Stretch.",
- "migration_0003_modified_files": "Mercés de notar que los fichièrs seguents son estats detectats coma modificats manualament e poiràn èsser escafats a la fin de la mesa a nivèl : {manually_modified_files}",
- "monitor_period_invalid": "Lo periòde de temps es incorrècte",
- "monitor_stats_file_not_found": "Lo fichièr d’estatisticas es introbable",
- "monitor_stats_period_unavailable": "Cap d’estatisticas son pas disponiblas pel periòde",
- "mysql_db_init_failed": "Impossible d’inicializar la basa de donadas MySQL",
- "service_disable_failed": "Impossible de desactivar lo servici « {service:s} »↵\n↵\nJornals recents : {logs:s}",
- "service_disabled": "Lo servici « {service:s} » es desactivat",
- "service_enable_failed": "Impossible d’activar lo servici « {service:s} »↵\n↵\nJornals recents : {logs:s}",
- "service_enabled": "Lo servici « {service:s} » es activat",
- "service_no_log": "Cap de jornal de far veire pel servici « {service:s} »",
- "service_regenconf_dry_pending_applying": "Verificacion de las configuracions en espèra que poirián èsser aplicadas pel servici « {service} »…",
- "service_regenconf_failed": "Regeneracion impossibla de la configuracion pels servicis : {services}",
- "service_regenconf_pending_applying": "Aplicacion de las configuracions en espèra pel servici « {service} »…",
- "service_remove_failed": "Impossible de levar lo servici « {service:s} »",
- "service_removed": "Lo servici « {service:s} » es estat levat",
- "service_start_failed": "Impossible d’aviar lo servici « {service:s} »↵\n↵\nJornals recents : {logs:s}",
- "service_started": "Lo servici « {service:s} » es aviat",
- "service_stop_failed": "Impossible d’arrestar lo servici « {service:s} »↵\n\nJornals recents : {logs:s}",
+ "service_disable_failed": "Impossible de desactivar lo servici « {service} »↵\n↵\nJornals recents : {logs}",
+ "service_disabled": "Lo servici « {service} » es desactivat",
+ "service_enable_failed": "Impossible d’activar lo servici « {service} »↵\n↵\nJornals recents : {logs}",
+ "service_enabled": "Lo servici « {service} » es activat",
+ "service_remove_failed": "Impossible de levar lo servici « {service} »",
+ "service_removed": "Lo servici « {service} » es estat levat",
+ "service_start_failed": "Impossible d’aviar lo servici « {service} »↵\n↵\nJornals recents : {logs}",
+ "service_started": "Lo servici « {service} » es aviat",
+ "service_stop_failed": "Impossible d’arrestar lo servici « {service} »↵\n\nJornals recents : {logs}",
"ssowat_conf_generated": "La configuracion SSowat es generada",
"ssowat_conf_updated": "La configuracion SSOwat es estada actualizada",
"system_upgraded": "Lo sistèma es estat actualizat",
@@ -296,132 +187,73 @@
"user_deleted": "L’utilizaire es suprimit",
"user_deletion_failed": "Supression impossibla de l’utilizaire",
"user_home_creation_failed": "Creacion impossibla del repertòri personal a l’utilizaire",
- "user_info_failed": "Recuperacion impossibla de las informacions tocant l’utilizaire",
- "user_unknown": "Utilizaire « {user:s} » desconegut",
+ "user_unknown": "Utilizaire « {user} » desconegut",
"user_update_failed": "Modificacion impossibla de l’utilizaire",
"user_updated": "L’utilizaire es estat modificat",
- "yunohost_ca_creation_failed": "Creacion impossibla de l’autoritat de certificacion",
- "yunohost_ca_creation_success": "L’autoritat de certificacion locala es creada.",
- "service_conf_file_kept_back": "Lo fichièr de configuracion « {conf} » deuriá èsser suprimit pel servici {service} mas es estat servat.",
- "service_conf_file_manually_modified": "Lo fichièr de configuracion « {conf} » es estat modificat manualament e serà pas actualizat",
- "service_conf_file_manually_removed": "Lo fichièr de configuracion « {conf} » es suprimit manualament e serà pas creat",
- "service_conf_file_remove_failed": "Supression impossibla del fichièr de configuracion « {conf} »",
- "service_conf_file_removed": "Lo fichièr de configuracion « {conf} » es suprimit",
- "service_conf_file_updated": "Lo fichièr de configuracion « {conf} » es actualizat",
- "service_conf_new_managed_file": "Lo servici {service} gerís ara lo fichièr de configuracion « {conf} ».",
- "service_conf_up_to_date": "La configuracion del servici « {service} » es ja actualizada",
- "service_conf_would_be_updated": "La configuracion del servici « {service} » seriá estada actualizada",
- "service_description_avahi-daemon": "permet d’aténher vòstre servidor via yunohost.local sus vòstre ret local",
"service_description_dnsmasq": "gerís la resolucion dels noms de domeni (DNS)",
"updating_apt_cache": "Actualizacion de la lista dels paquets disponibles…",
- "service_conf_file_backed_up": "Lo fichièr de configuracion « {conf} » es salvagardat dins « {backup} »",
- "service_conf_file_copy_failed": "Còpia impossibla del nòu fichièr de configuracion « {new} » cap a « {conf} »",
- "server_reboot_confirm": "Lo servidor es per reaviar sul pic, o volètz vertadièrament ? {answers:s}",
- "service_add_failed": "Apondon impossible del servici « {service:s} »",
- "service_added": "Lo servici « {service:s} » es ajustat",
- "service_already_started": "Lo servici « {service:s} » es ja aviat",
- "service_already_stopped": "Lo servici « {service:s} » es ja arrestat",
+ "server_reboot_confirm": "Lo servidor es per reaviar sul pic, o volètz vertadièrament ? {answers}",
+ "service_add_failed": "Apondon impossible del servici « {service} »",
+ "service_added": "Lo servici « {service} » es ajustat",
+ "service_already_started": "Lo servici « {service} » es ja aviat",
+ "service_already_stopped": "Lo servici « {service} » es ja arrestat",
"restore_cleaning_failed": "Impossible de netejar lo repertòri temporari de restauracion",
"restore_complete": "Restauracion acabada",
- "restore_confirm_yunohost_installed": "Volètz vertadièrament restaurar un sistèma ja installat ? {answers:s}",
+ "restore_confirm_yunohost_installed": "Volètz vertadièrament restaurar un sistèma ja installat ? {answers}",
"restore_extracting": "Extraccions dels fichièrs necessaris dins de l’archiu…",
"restore_failed": "Impossible de restaurar lo sistèma",
- "restore_hook_unavailable": "Lo script de restauracion « {part:s} » es pas disponible sus vòstre sistèma e es pas tanpauc dins l’archiu",
- "restore_may_be_not_enough_disk_space": "Lo sistèma sembla d’aver pas pro d’espaci disponible (liure : {free_space:d} octets, necessari : {needed_space:d} octets, marge de seguretat : {margin:d} octets)",
- "restore_mounting_archive": "Montatge de l’archiu dins « {path:s} »",
- "restore_not_enough_disk_space": "Espaci disponible insufisent (liure : {free_space:d} octets, necessari : {needed_space:d} octets, marge de seguretat : {margin:d} octets)",
+ "restore_hook_unavailable": "Lo script de restauracion « {part} » es pas disponible sus vòstre sistèma e es pas tanpauc dins l’archiu",
+ "restore_may_be_not_enough_disk_space": "Lo sistèma sembla d’aver pas pro d’espaci disponible (liure : {free_space} octets, necessari : {needed_space} octets, marge de seguretat : {margin} octets)",
+ "restore_not_enough_disk_space": "Espaci disponible insufisent (liure : {free_space} octets, necessari : {needed_space} octets, marge de seguretat : {margin} octets)",
"restore_nothings_done": "Res es pas estat restaurat",
"restore_removing_tmp_dir_failed": "Impossible de levar u ancian repertòri temporari",
- "restore_running_app_script": "Lançament del script de restauracion per l’aplicacion « {app:s} »…",
+ "restore_running_app_script": "Lançament del script de restauracion per l’aplicacion « {app} »…",
"restore_running_hooks": "Execucion dels scripts de restauracion…",
- "restore_system_part_failed": "Restauracion impossibla de la part « {part:s} » del sistèma",
+ "restore_system_part_failed": "Restauracion impossibla de la part « {part} » del sistèma",
"server_shutdown": "Lo servidor serà atudat",
- "server_shutdown_confirm": "Lo servidor es per s’atudar sul pic, o volètz vertadièrament ? {answers:s}",
+ "server_shutdown_confirm": "Lo servidor es per s’atudar sul pic, o volètz vertadièrament ? {answers}",
"server_reboot": "Lo servidor es per reaviar",
- "network_check_mx_ko": "L’enregistrament DNS MX es pas especificat",
- "new_domain_required": "Vos cal especificar lo domeni màger",
- "no_ipv6_connectivity": "La connectivitat IPv6 es pas disponibla",
- "not_enough_disk_space": "Espaci disc insufisent sus « {path:s} »",
- "package_unexpected_error": "Una error inesperada es apareguda amb lo paquet « {pkgname} »",
- "packages_upgrade_critical_later": "Los paquets critics {packages:s} seràn actualizats mai tard",
- "restore_action_required": "Devètz precisar çò que cal restaurar",
- "service_cmd_exec_failed": "Impossible d’executar la comanda « {command:s} »",
- "service_conf_updated": "La configuracion es estada actualizada pel servici « {service} »",
+ "not_enough_disk_space": "Espaci disc insufisent sus « {path} »",
+ "service_cmd_exec_failed": "Impossible d’executar la comanda « {command} »",
"service_description_mysql": "garda las donadas de las aplicacions (base de donadas SQL)",
- "service_description_php5-fpm": "executa d’aplicacions escrichas en PHP amb nginx",
"service_description_postfix": "emplegat per enviar e recebre de corrièls",
- "service_description_rmilter": "verifica mantun paramètres dels corrièls",
"service_description_slapd": "garda los utilizaires, domenis e lors informacions ligadas",
"service_description_ssh": "vos permet de vos connectar a distància a vòstre servidor via un teminal (protocòl SSH)",
"service_description_yunohost-api": "permet las interaccions entre l’interfàcia web de YunoHost e le sistèma",
"service_description_yunohost-firewall": "gerís los pòrts de connexion dobèrts e tampats als servicis",
- "ssowat_persistent_conf_read_error": "Error en legir la configuracion duradissa de SSOwat : {error:s}. Modificatz lo fichièr /etc/ssowat/conf.json.persistent per reparar la sintaxi JSON",
- "ssowat_persistent_conf_write_error": "Error en salvagardar la configuracion duradissa de SSOwat : {error:s}. Modificatz lo fichièr /etc/ssowat/conf.json.persistent per reparar la sintaxi JSON",
- "certmanager_old_letsencrypt_app_detected": "\nYunohost a detectat que l’aplicacion ’letsencrypt’ es installada, aquò es en conflicte amb las novèlas foncionalitats integradas de gestion dels certificats de Yunohost. Se volètz utilizar aquelas foncionalitats integradas, mercés de lançar las comandas seguentas per migrar vòstra installacion :\n\n yunohost app remove letsencrypt\n yunohost domain cert-install\n\nN.B. : aquò provarà de tornar installar los certificats de totes los domenis amb un certificat Let’s Encrypt o las auto-signats",
- "diagnosis_monitor_disk_error": "Impossible de supervisar los disques : {error}",
- "diagnosis_monitor_network_error": "Impossible de supervisar la ret : {error}",
- "diagnosis_monitor_system_error": "Impossible de supervisar lo sistèma : {error}",
- "executing_command": "Execucion de la comanda « {command:s} »…",
- "executing_script": "Execucion del script « {script:s} »…",
- "global_settings_cant_serialize_settings": "Fracàs de la serializacion de las donadas de parametratge, rason : {reason:s}",
+ "global_settings_cant_serialize_settings": "Fracàs de la serializacion de las donadas de parametratge, rason : {reason}",
"ip6tables_unavailable": "Podètz pas jogar amb ip6tables aquí. Siá sèts dins un contenedor, siá vòstre nuclèu es pas compatible amb aquela opcion",
"iptables_unavailable": "Podètz pas jogar amb iptables aquí. Siá sèts dins un contenedor, siá vòstre nuclèu es pas compatible amb aquela opcion",
- "update_cache_failed": "Impossible d’actualizar lo cache de l’APT",
- "mail_alias_remove_failed": "Supression impossibla de l’alias de corrièl « {mail:s} »",
- "mail_forward_remove_failed": "Supression impossibla del corrièl de transferiment « {mail:s} »",
- "migrate_tsig_start": "L’algorisme de generacion de claus es pas pro securizat per la signatura TSIG del domeni « {domain} », lançament de la migracion cap a hmac-sha512 que’s mai securizat",
- "migration_description_0001_change_cert_group_to_sslcert": "Càmbia las permissions de grop dels certificats de « metronome » per « ssl-cert »",
- "migration_0003_restoring_origin_nginx_conf": "Vòstre fichièr /etc/nginx/nginx.conf es estat modificat manualament. La migracion reïnicializarà d’en primièr son estat origina… Lo fichièr precedent serà disponible coma {backup_dest}.",
- "migration_0003_still_on_jessie_after_main_upgrade": "Quicòm a trucat pendent la mesa a nivèl màger : lo sistèma es encara jos Jessie ?!? Per trobar lo problèma, agachatz {log}…",
- "migration_0003_general_warning": "Notatz qu’aquesta migracion es una operacion delicata. Encara que la còla YunoHost aguèsse fach çò melhor per la tornar legir e provar, la migracion poiriá copar de parts del sistèma o de las aplicacions.\n\nEn consequéncia, vos recomandam :\n· · · · - de lançar una salvagarda de vòstras donadas o aplicacions criticas. Mai d’informacions a https://yunohost.org/backup ;\n· · · · - d’èsser pacient aprèp aver lançat la migracion : segon vòstra connexion Internet e material, pòt trigar qualques oras per que tot siá mes al nivèl.\n\nEn mai, lo pòrt per SMTP, utilizat pels clients de corrièls extèrns (coma Thunderbird o K9-Mail per exemple) foguèt cambiat de 465 (SSL/TLS) per 587 (STARTTLS). L’ancian pòrt 465 serà automaticament tampat e lo nòu pòrt 587 serà dobèrt dins lo parafuòc. Vosautres e vòstres utilizaires *auretz* d’adaptar la configuracion de vòstre client de corrièl segon aqueles cambiaments !",
- "migration_0003_problematic_apps_warning": "Notatz que las aplicacions seguentas, saique problematicas, son estadas desactivadas. Semblan d’aver estadas installadas d’una lista d’aplicacions o que son pas marcadas coma «working ». En consequéncia, podèm pas assegurar que tendràn de foncionar aprèp la mesa a nivèl : {problematic_apps}",
- "migrations_bad_value_for_target": "Nombre invalid pel paramètre « target », los numèros de migracion son 0 o {}",
- "migrations_migration_has_failed": "La migracion {number} {name} a pas capitat amb l’excepcion {exception}, anullacion",
- "migrations_skip_migration": "Passatge de la migracion {number} {name}…",
- "migrations_to_be_ran_manually": "La migracion {number} {name} deu èsser lançada manualament. Mercés d’anar a Aisinas > Migracion dins l’interfàcia admin, o lançar « yunohost tools migrations migrate ».",
- "migrations_need_to_accept_disclaimer": "Per lançar la migracion {number} {name} , avètz d’acceptar aquesta clausa de non-responsabilitat :\n---\n{disclaimer}\n---\nS’acceptatz de lançar la migracion, mercés de tornar executar la comanda amb l’opcion accept-disclaimer.",
- "monitor_disabled": "La supervision del servidor es desactivada",
- "monitor_enabled": "La supervision del servidor es activada",
- "mysql_db_initialized": "La basa de donadas MySQL es estada inicializada",
- "no_restore_script": "Lo script de salvagarda es pas estat trobat per l’aplicacion « {app:s} »",
+ "mail_alias_remove_failed": "Supression impossibla de l’alias de corrièl « {mail} »",
+ "mail_forward_remove_failed": "Supression impossibla del corrièl de transferiment « {mail} »",
+ "migrations_migration_has_failed": "La migracion {id} a pas capitat, abandon. Error : {exception}",
+ "migrations_skip_migration": "Passatge de la migracion {id}…",
+ "migrations_to_be_ran_manually": "La migracion {id} deu èsser lançada manualament. Mercés d’anar a Aisinas > Migracion dins l’interfàcia admin, o lançar « yunohost tools migrations run ».",
+ "migrations_need_to_accept_disclaimer": "Per lançar la migracion {id} , avètz d’acceptar aquesta clausa de non-responsabilitat :\n---\n{disclaimer}\n---\nS’acceptatz de lançar la migracion, mercés de tornar executar la comanda amb l’opcion accept-disclaimer.",
"pattern_backup_archive_name": "Deu èsser un nom de fichièr valid compausat de 30 caractèrs alfanumerics al maximum e « -_. »",
- "pattern_listname": "Deu èsser compausat solament de caractèrs alfanumerics e de tirets basses",
"service_description_dovecot": "permet als clients de messatjariá d’accedir/recuperar los corrièls (via IMAP e POP3)",
"service_description_fail2ban": "protegís contra los atacs brute-force e d’autres atacs venents d’Internet",
- "service_description_glances": "susvelha las informacions sistèma de vòstre servidor",
"service_description_metronome": "gerís los comptes de messatjariás instantanèas XMPP",
"service_description_nginx": "fornís o permet l’accès a totes los sites web albergats sus vòstre servidor",
- "service_description_nslcd": "gerís la connexion en linha de comanda dels utilizaires YunoHost",
"service_description_redis-server": "una basa de donadas especializada per un accès rapid a las donadas, las filas d’espèra e la comunicacion entre programas",
"service_description_rspamd": "filtra lo corrièl pas desirat e mai foncionalitats ligadas al corrièl",
- "migrations_backward": "Migracion en darrièr.",
- "migrations_forward": "Migracion en avant",
- "network_check_smtp_ko": "Lo trafic de corrièl sortent (pòrt 25 SMTP) sembla blocat per vòstra ret",
- "network_check_smtp_ok": "Lo trafic de corrièl sortent (pòrt 25 SMTP) es pas blocat",
"pattern_mailbox_quota": "Deu èsser una talha amb lo sufixe b/k/M/G/T o 0 per desactivar la quòta",
- "backup_archive_writing_error": "Impossible d’ajustar los fichièrs a la salvagarda dins l’archiu comprimit",
+ "backup_archive_writing_error": "Impossible d’ajustar los fichièrs « {source} » a la salvagarda (nomenats dins l’archiu « {dest} »)dins l’archiu comprimit « {archive} »",
"backup_cant_mount_uncompress_archive": "Impossible de montar en lectura sola lo repertòri de l’archiu descomprimit",
"backup_no_uncompress_archive_dir": "Lo repertòri de l’archiu descomprimit existís pas",
"pattern_username": "Deu èsser compausat solament de caractèrs alfanumerics en letras minusculas e de tirets basses",
"experimental_feature": "Atencion : aquesta foncionalitat es experimentala e deu pas èsser considerada coma establa, deuriatz pas l’utilizar levat que sapiatz çò que fasètz.",
- "log_corrupted_md_file": "Lo fichièr yaml de metadonada amb los jornals d’audit es corromput : « {md_file} »",
- "log_category_404": "La categoria de jornals d’audit « {category} » existís pas",
+ "log_corrupted_md_file": "Lo fichièr YAML de metadonadas ligat als jornals d’audit es damatjat : « {md_file} »\nError : {error}",
"log_link_to_log": "Jornal complèt d’aquesta operacion : {desc}",
- "log_help_to_get_log": "Per veire lo jornal d’aquesta operacion « {desc} », utilizatz la comanda « yunohost log display {name} »",
- "backup_php5_to_php7_migration_may_fail": "Impossible de convertir vòstre archiu per prendre en carga PHP 7, la restauracion de vòstras aplicacions PHP pòt reüssir pas (rason : {error:s})",
+ "log_help_to_get_log": "Per veire lo jornal d’aquesta operacion « {desc} », utilizatz la comanda « yunohost log show {name} »",
"log_link_to_failed_log": "L’operacion « {desc} » a pas capitat ! Per obténer d’ajuda, mercés de fornir lo jornal complèt de l’operacion",
- "log_help_to_get_failed_log": "L’operacion « {desc} » a pas reüssit ! Per obténer d’ajuda, mercés de partejar lo jornal d’audit complèt d’aquesta operacion en utilizant la comanda « yunohost log display {name} --share »",
+ "log_help_to_get_failed_log": "L’operacion « {desc} » a pas reüssit ! Per obténer d’ajuda, mercés de partejar lo jornal d’audit complèt d’aquesta operacion en utilizant la comanda « yunohost log share {name} »",
"log_does_exists": "I a pas cap de jornal d’audit per l’operacion amb lo nom « {log} », utilizatz « yunohost log list » per veire totes los jornals d’operacion disponibles",
"log_operation_unit_unclosed_properly": "L’operacion a pas acabat corrèctament",
- "log_app_addaccess": "Ajustar l’accès a « {} »",
- "log_app_removeaccess": "Tirar l’accès a « {} »",
- "log_app_clearaccess": "Tirar totes los accèsses a « {} »",
- "log_app_fetchlist": "Ajustar una lista d’aplicacions",
- "log_app_removelist": "Levar una lista d’aplicacions",
"log_app_change_url": "Cambiar l’URL de l’aplicacion « {} »",
"log_app_install": "Installar l’aplicacion « {} »",
"log_app_remove": "Levar l’aplicacion « {} »",
- "log_app_upgrade": "Metre a jorn l’aplicacion « {} »",
+ "log_app_upgrade": "Actualizar l’aplicacion « {} »",
"log_app_makedefault": "Far venir « {} » l’aplicacion per defaut",
"log_available_on_yunopaste": "Lo jornal es ara disponible via {url}",
"log_backup_restore_system": "Restaurar lo sistèma a partir d’una salvagarda",
@@ -431,36 +263,22 @@
"log_domain_add": "Ajustar lo domeni « {} » dins la configuracion sistèma",
"log_domain_remove": "Tirar lo domeni « {} » d’a la configuracion sistèma",
"log_dyndns_subscribe": "S’abonar al subdomeni YunoHost « {} »",
- "log_dyndns_update": "Metre a jorn l’adreça IP ligada a vòstre jos-domeni YunoHost « {} »",
- "log_letsencrypt_cert_install": "Installar lo certificat Let's encrypt sul domeni « {} »",
+ "log_dyndns_update": "Actualizar l’adreça IP ligada a vòstre jos-domeni YunoHost « {} »",
+ "log_letsencrypt_cert_install": "Installar un certificat Let's Encrypt sul domeni « {} »",
"log_selfsigned_cert_install": "Installar lo certificat auto-signat sul domeni « {} »",
- "log_letsencrypt_cert_renew": "Renovar lo certificat Let's encrypt de « {} »",
- "log_service_enable": "Activar lo servici « {} »",
- "log_service_regen_conf": "Regenerar la configuracion sistèma de « {} »",
+ "log_letsencrypt_cert_renew": "Renovar lo certificat Let's Encrypt de « {} »",
"log_user_create": "Ajustar l’utilizaire « {} »",
"log_user_delete": "Levar l’utilizaire « {} »",
- "log_user_update": "Metre a jorn las informacions a l’utilizaire « {} »",
- "log_tools_maindomain": "Far venir « {} » lo domeni màger",
- "log_tools_migrations_migrate_forward": "Migrar",
- "log_tools_migrations_migrate_backward": "Tornar en arrièr",
+ "log_user_update": "Actualizar las informacions de l’utilizaire « {} »",
+ "log_domain_main_domain": "Far venir « {} » lo domeni màger",
+ "log_tools_migrations_migrate_forward": "Executar las migracions",
"log_tools_postinstall": "Realizar la post installacion del servidor YunoHost",
- "log_tools_upgrade": "Mesa a jorn dels paquets sistèma",
+ "log_tools_upgrade": "Actualizacion dels paquets sistèma",
"log_tools_shutdown": "Atudar lo servidor",
"log_tools_reboot": "Reaviar lo servidor",
"mail_unavailable": "Aquesta adreça electronica es reservada e deu èsser automaticament atribuida al tot bèl just primièr utilizaire",
- "migration_description_0004_php5_to_php7_pools": "Tornar configurar lo pools PHP per utilizar PHP 7 allòc del 5",
- "migration_description_0005_postgresql_9p4_to_9p6": "Migracion de las basas de donadas de postgresql 9.4 cap a 9.6",
- "migration_0005_postgresql_94_not_installed": "Postgresql es pas installat sul sistèma. Pas res de far !",
- "migration_0005_postgresql_96_not_installed": "Avèm trobat que Postgresql 9.4 es installat, mas cap de version de Postgresql 9.6 pas trobada !? Quicòm d’estranh a degut arribar a vòstre sistèma :( …",
- "migration_0005_not_enough_space": "I a pas pro d’espaci disponible sus {path} per lançar la migracion d’aquela passa :(.",
- "recommend_to_add_first_user": "La post installacion es acabada, mas YunoHost fa besonh d’almens un utilizaire per foncionar coma cal. Vos cal n’ajustar un en utilizant la comanda « yunohost user create $username » o ben l’interfàcia d’administracion.",
- "service_description_php7.0-fpm": "executa d’aplicacions escrichas en PHP amb nginx",
- "users_available": "Lista dels utilizaires disponibles :",
- "good_practices_about_admin_password": "Sètz per definir un nòu senhal per l’administracion. Lo senhal deu almens conténer 8 caractèrs - encara que siá de bon far d’utilizar un senhal mai long qu’aquò (ex. una passafrasa) e/o d’utilizar mantun tipes de caractèrs (majuscula, minuscula, nombre e caractèrs especials).",
- "good_practices_about_user_password": "Sètz a mand de definir un nòu senhal d’utilizaire. Lo nòu senhal deu conténer almens 8 caractèrs, es de bon far d’utilizar un senhal mai long (es a dire una frasa de senhal) e/o utilizar mantuns tipes de caractèrs (majusculas, minusculas, nombres e caractèrs especials).",
- "migration_description_0006_sync_admin_and_root_passwords": "Sincronizar los senhals admin e root",
- "migration_0006_disclaimer": "Ara YunoHost s’espèra que los senhals admin e root sián sincronizats. En lançant aquesta migracion, vòstre senhal root serà remplaçat pel senhal admin.",
- "migration_0006_done": "Lo senhal root es estat remplaçat pel senhal admin.",
+ "good_practices_about_admin_password": "Sètz per definir un nòu senhal per l’administracion. Lo senhal deu almens conténer 8 caractèrs - encara que siá de bon far d’utilizar un senhal mai long qu’aquò (ex. una passafrasa) e/o d’utilizar mantun tipe de caractèrs (majuscula, minuscula, nombre e caractèrs especials).",
+ "good_practices_about_user_password": "Sètz a mand de definir un nòu senhal d’utilizaire. Lo nòu senhal deu conténer almens 8 caractèrs, es de bon far d’utilizar un senhal mai long (es a dire una frasa de senhal) e/o utilizar mantun tipe de caractèrs (majusculas, minusculas, nombres e caractèrs especials).",
"password_listed": "Aqueste senhal es un dels mai utilizats al monde. Se vos plai utilizatz-ne un mai unic.",
"password_too_simple_1": "Lo senhal deu conténer almens 8 caractèrs",
"password_too_simple_2": "Lo senhal deu conténer almens 8 caractèrs e numbres, majusculas e minusculas",
@@ -468,47 +286,37 @@
"password_too_simple_4": "Lo senhal deu conténer almens 12 caractèrs, de nombre, majusculas, minisculas e caractèrs specials",
"root_password_desynchronized": "Lo senhal de l’administrator es estat cambiat, mas YunoHost a pas pogut l’espandir al senhal root !",
"aborting": "Interrupcion.",
- "app_not_upgraded": "Las aplicacions seguentas son pas estadas actualizadas : {apps}",
- "app_start_install": "Installacion de l’aplicacion {app}…",
- "app_start_remove": "Supression de l’aplicacion {app}…",
- "app_start_backup": "Recuperacion dels fichièrs de salvagardar per {app}…",
- "app_start_restore": "Restauracion de l’aplicacion {app}…",
- "app_upgrade_several_apps": "Las aplicacions seguentas seràn mesas a jorn : {apps}",
+ "app_not_upgraded": "L’aplicacion « {failed_app} » a pas reüssit a s’actualizar e coma consequéncia las mesas a jorn de las aplicacions seguentas son estadas anulladas : {apps}",
+ "app_start_install": "Installacion de l’aplicacion {app}...",
+ "app_start_remove": "Supression de l’aplicacion {app}...",
+ "app_start_backup": "Recuperacion dels fichièrs de salvagardar per {app}...",
+ "app_start_restore": "Restauracion de l’aplicacion {app}...",
+ "app_upgrade_several_apps": "Las aplicacions seguentas seràn actualizadas : {apps}",
"ask_new_domain": "Nòu domeni",
"ask_new_path": "Nòu camin",
- "backup_actually_backuping": "Creacion d’un archiu de seguretat a partir dels fichièrs recuperats…",
- "backup_mount_archive_for_restore": "Preparacion de l’archiu per restauracion…",
- "dyndns_could_not_check_available": "Verificacion impossibla de la disponibilitat de {domain:s} sus {provider:s}.",
- "file_does_not_exist": "Lo camin {path:s} existís pas.",
+ "backup_actually_backuping": "Creacion d’un archiu de seguretat a partir dels fichièrs recuperats...",
+ "backup_mount_archive_for_restore": "Preparacion de l’archiu per restauracion...",
+ "dyndns_could_not_check_available": "Verificacion impossibla de la disponibilitat de {domain} sus {provider}.",
+ "file_does_not_exist": "Lo camin {path} existís pas.",
"global_settings_setting_security_password_admin_strength": "Fòrça del senhal administrator",
"global_settings_setting_security_password_user_strength": "Fòrça del senhal utilizaire",
- "migration_description_0007_ssh_conf_managed_by_yunohost_step1": "La configuracion SSH serà gerada per YunoHost (etapa 1, automatica)",
- "migration_description_0008_ssh_conf_managed_by_yunohost_step2": "Daissar YunoHost gerir la configuracion SSH (etapa 2, manuala)",
- "migration_0007_cancelled": "YunoHost a pas reüssit a melhorar lo biais de gerir la configuracion SSH.",
"root_password_replaced_by_admin_password": "Lo senhal root es estat remplaçat pel senhal administrator.",
- "service_restarted": "Lo servici '{service:s}' es estat reaviat",
+ "service_restarted": "Lo servici '{service}' es estat reaviat",
"admin_password_too_long": "Causissètz un senhal d’almens 127 caractèrs",
- "migration_0007_cannot_restart": "SSH pòt pas èsser reavit aprèp aver ensajat d’anullar la migracion numèro 6.",
- "migrations_success": "Migracion {number} {name} reüssida !",
- "service_conf_now_managed_by_yunohost": "Lo fichièr de configuracion « {conf} » es ara gerit per YunoHost.",
- "service_reloaded": "Lo servici « {servici:s} » es estat tornat cargar",
+ "service_reloaded": "Lo servici « {service} » es estat tornat cargar",
"already_up_to_date": "I a pas res a far ! Tot es ja a jorn !",
- "app_action_cannot_be_ran_because_required_services_down": "Aquesta aplicacion necessita unes servicis que son actualament encalats. Abans de contunhar deuriatz ensajar de reaviar los servicis seguents (e tanben cercar perque son tombats en pana) : {services}",
- "confirm_app_install_warning": "Atencion : aquesta aplicacion fonciona mas non es pas ben integrada amb YunoHost. Unas foncionalitats coma l’autentificacion unica e la còpia de seguretat/restauracion pòdon èsser indisponiblas. volètz l’installar de totas manièras ? [{answers:s}] ",
- "confirm_app_install_danger": "ATENCION ! Aquesta aplicacion es encara experimentala (autrament dich, fonciona pas) e es possible que còpe lo sistèma ! Deuriatz PAS l’installar se non sabètz çò que fasètz. Volètz vertadièrament córrer aqueste risc ? [{answers:s}] ",
- "confirm_app_install_thirdparty": "ATENCION ! L’installacion d’aplicacions tèrças pòt comprometre l’integralitat e la seguretat del sistèma. Deuriatz PAS l’installar se non sabètz pas çò que fasètz. Volètz vertadièrament córrer aqueste risc ? [{answers:s}] ",
+ "app_action_cannot_be_ran_because_required_services_down": "Aquestas aplicacions necessitan d’èsser lançadas per poder executar aquesta accion : {services}. Abans de contunhar deuriatz ensajar de reaviar los servicis seguents (e tanben cercar perque son tombats en pana) : {services}",
+ "confirm_app_install_warning": "Atencion : aquesta aplicacion fonciona mas non es pas ben integrada amb YunoHost. Unas foncionalitats coma l’autentificacion unica e la còpia de seguretat/restauracion pòdon èsser indisponiblas. volètz l’installar de totas manièras ? [{answers}] ",
+ "confirm_app_install_danger": "PERILH ! Aquesta aplicacion es encara experimentala (autrament dich, fonciona pas) e es possible que còpe lo sistèma ! Deuriatz PAS l’installar se non sabètz çò que fasètz. Volètz vertadièrament córrer aqueste risc ? [{answers}]",
+ "confirm_app_install_thirdparty": "ATENCION ! L’installacion d’aplicacions tèrças pòt comprometre l’integralitat e la seguretat del sistèma. Deuriatz PAS l’installar se non sabètz pas çò que fasètz. Volètz vertadièrament córrer aqueste risc ? [{answers}] ",
"dpkg_lock_not_available": "Aquesta comanda pòt pas s’executar pel moment perque un autre programa sembla utilizar lo varrolh de dpkg (lo gestionari de paquets del sistèma)",
"log_regen_conf": "Regenerar las configuracions del sistèma « {} »",
- "service_reloaded_or_restarted": "Lo servici « {service:s} » es estat recargat o reaviat",
+ "service_reloaded_or_restarted": "Lo servici « {service} » es estat recargat o reaviat",
"tools_upgrade_regular_packages_failed": "Actualizacion impossibla dels paquets seguents : {packages_list}",
"tools_upgrade_special_packages_completed": "L’actualizacion dels paquets de YunoHost es acabada !\nQuichatz [Entrada] per tornar a la linha de comanda",
- "updating_app_lists": "Recuperacion de las mesas a jorn disponiblas per las aplicacions…",
- "dpkg_is_broken": "Podètz pas far aquò pel moment perque dpkg/apt (los gestionaris de paquets del sistèma) sembla èsser mal configurat... Podètz ensajar de solucionar aquò en vos connectar via SSH e en executar « sudo dpkg --configure -a ».",
+ "dpkg_is_broken": "Podètz pas far aquò pel moment perque dpkg/APT (los gestionaris de paquets del sistèma) sembla èsser mal configurat… Podètz ensajar de solucionar aquò en vos connectar via SSH e en executar « sudo dpkg --configure -a ».",
"global_settings_setting_service_ssh_allow_deprecated_dsa_hostkey": "Autorizar l’utilizacion de la clau òst DSA (obsolèta) per la configuracion del servici SSH",
- "migration_0008_general_disclaimer": "Per melhorar la seguretat del servidor, es recomandat de daissar YunoHost gerir la configuracion SSH. Vòstra configuracion actuala es diferenta de la configuracion recomandada. Se daissatz YunoHost la reconfigurar, lo biais de vos connectar al servidor via SSH cambiarà coma aquò :",
- "hook_json_return_error": "Fracàs de la lectura del retorn de l’script {path:s}. Error : {msg:s}. Contengut brut : {raw_content}",
- "migration_0008_port": " - vos cal vos connectar en utilizar lo pòrt 22 allòc de vòstre pòrt SSH actual personalizat. Esitetz pas a lo reconfigurar ;",
- "migration_0009_not_needed": "Sembla qu’i aguèt ja una migracion. Passem.",
+ "hook_json_return_error": "Fracàs de la lectura del retorn de l’script {path}. Error : {msg}. Contengut brut : {raw_content}",
"pattern_password_app": "O planhèm, los senhals devon pas conténer los caractèrs seguents : {forbidden_chars}",
"regenconf_file_backed_up": "Lo fichièr de configuracion « {conf} » es estat salvagardat dins « {backup} »",
"regenconf_file_copy_failed": "Còpia impossibla del nòu fichièr de configuracion « {new} » cap a « {conf} »",
@@ -526,19 +334,13 @@
"regenconf_pending_applying": "Aplicacion de la configuracion en espèra per la categoria « {category} »…",
"tools_upgrade_cant_both": "Actualizacion impossibla del sistèma e de las aplicacions a l’encòp",
"tools_upgrade_cant_hold_critical_packages": "Manteniment impossible dels paquets critiques…",
- "global_settings_setting_security_nginx_compatibility": "Solucion de compromés entre compatibilitat e seguretat pel servidor web nginx. Afècta los criptografs (e d’autres aspèctes ligats amb la seguretat)",
+ "global_settings_setting_security_nginx_compatibility": "Solucion de compromés entre compatibilitat e seguretat pel servidor web NGINX Afècta los criptografs (e d’autres aspèctes ligats amb la seguretat)",
"global_settings_setting_security_ssh_compatibility": "Solucion de compromés entre compatibilitat e seguretat pel servidor SSH. Afècta los criptografs (e d’autres aspèctes ligats amb la seguretat)",
"global_settings_setting_security_postfix_compatibility": "Solucion de compromés entre compatibilitat e seguretat pel servidor Postfix. Afècta los criptografs (e d’autres aspèctes ligats amb la seguretat)",
- "migration_description_0010_migrate_to_apps_json": "Levar las appslists despreciadas e utilizar la nòva lista unificada « apps.json » allòc",
- "migration_0008_root": " - vos poiretz pas vos connectar coma root via SSH. Allòc auretz d’utilizar l’utilizaire admin;",
- "migration_0008_warning": "Se comprenètz aquestes avertiments e qu’acceptatz de daissar YunoHost remplaçar la configuracion actuala, començatz la migracion. Autrament podètz tanben passar la migracion, encara que non siá pas recomandat.",
"service_regen_conf_is_deprecated": "« yunohost service regen-conf » es despreciat ! Utilizatz « yunohost tools regen-conf » allòc.",
- "service_reload_failed": "Impossible de recargar lo servici « {service:s} »\n\nJornal d’audit recent : {logs:s}",
- "service_restart_failed": "Impossible de reaviar lo servici « {service:s} »\n\nJornal d’audit recent : {logs:s}",
- "service_reload_or_restart_failed": "Impossible de recargar o reaviar lo servici « {service:s} »\n\nJornal d’audit recent : {logs:s}",
- "migration_description_0009_decouple_regenconf_from_services": "Desassociar lo mecanisme de regen-conf dels servicis",
- "migration_0008_dsa": " - la clau DSA serà desactivada. En consequéncia, poiriatz aver d’invalidar un messatge espaurugant del client SSH, e tornar verificar l’emprunta del servidor;",
- "migration_0008_no_warning": "Cap de risc important es estat detectat per remplaçar e la configuracion SSH, mas podèm pas n’èsser totalament segur ;) Se acceptatz que YunoHost remplace la configuracion actuala, començatz la migracion. Autrament, podètz passar la migracion, tot ben que non siá pas recomandat.",
+ "service_reload_failed": "Impossible de recargar lo servici « {service} »\n\nJornal d’audit recent : {logs}",
+ "service_restart_failed": "Impossible de reaviar lo servici « {service} »\n\nJornal d’audit recent : {logs}",
+ "service_reload_or_restart_failed": "Impossible de recargar o reaviar lo servici « {service} »\n\nJornal d’audit recent : {logs}",
"regenconf_file_kept_back": "S’espèra que lo fichièr de configuracion « {conf} » siá suprimit per regen-conf (categoria {category} mas es estat mantengut.",
"this_action_broke_dpkg": "Aquesta accion a copat dpkg/apt (los gestionaris de paquets del sistèma)… Podètz ensajar de resòlver aqueste problèma en vos connectant amb SSH e executant « sudo dpkg --configure -a ».",
"tools_upgrade_at_least_one": "Especificatz --apps O --system",
@@ -547,5 +349,168 @@
"tools_upgrade_special_packages": "Actualizacion dels paquets « especials » (ligats a YunoHost)…",
"tools_upgrade_special_packages_explanation": "Aquesta accion s’acabarà mas l’actualizacion especiala actuala contunharà en rèire-plan. Comencetz pas cap d’autra accion sul servidor dins las ~ 10 minutas que venon (depend de la velocitat de la maquina). Un còp acabat, benlèu que vos calrà vos tornar connectar a l’interfàcia d’administracion. Los jornals d’audit de l’actualizacion seràn disponibles a Aisinas > Jornals d’audit (dins l’interfàcia d’administracion) o amb « yunohost log list » (en linha de comanda).",
"update_apt_cache_failed": "I a agut d’errors en actualizar la memòria cache d’APT (lo gestionari de paquets de Debian). Aquí avètz las linhas de sources.list que pòdon vos ajudar a identificar las linhas problematicas : \n{sourceslist}",
- "update_apt_cache_warning": "I a agut d’errors en actualizar la memòria cache d’APT (lo gestionari de paquets de Debian). Aquí avètz las linhas de sources.list que pòdon vos ajudar a identificar las linhas problematicas : \n{sourceslist}"
-}
+ "update_apt_cache_warning": "I a agut d’errors en actualizar la memòria cache d’APT (lo gestionari de paquets de Debian). Aquí avètz las linhas de sources.list que pòdon vos ajudar a identificar las linhas problematicas : \n{sourceslist}",
+ "backup_permission": "Autorizacion de salvagarda per l’aplicacion {app}",
+ "group_created": "Grop « {group} » creat",
+ "group_creation_failed": "Fracàs de la creacion del grop « {group} » : {error}",
+ "group_deleted": "Lo grop « {group} » es estat suprimit",
+ "group_deletion_failed": "Fracàs de la supression del grop « {group} » : {error}",
+ "group_unknown": "Lo grop « {group} » es desconegut",
+ "log_user_group_delete": "Suprimir lo grop « {} »",
+ "group_updated": "Lo grop « {group} » es estat actualizat",
+ "group_update_failed": "Actualizacion impossibla del grop « {group} » : {error}",
+ "log_user_group_update": "Actualizar lo grop « {} »",
+ "permission_already_exist": "La permission « {permission} » existís ja",
+ "permission_created": "Permission « {permission} » creada",
+ "permission_creation_failed": "Creacion impossibla de la permission",
+ "permission_deleted": "Permission « {permission} » suprimida",
+ "permission_deletion_failed": "Fracàs de la supression de la permission « {permission} »",
+ "permission_not_found": "Permission « {permission} » pas trobada",
+ "permission_update_failed": "Fracàs de l’actualizacion de la permission",
+ "permission_updated": "La permission « {permission} » es estada actualizada",
+ "mailbox_disabled": "La bóstia de las letras es desactivada per l’utilizaire {user}",
+ "migrations_success_forward": "Migracion {id} corrèctament realizada !",
+ "migrations_running_forward": "Execucion de la migracion {id}…",
+ "migrations_must_provide_explicit_targets": "Devètz fornir una cibla explicita quand utilizatz using --skip o --force-rerun",
+ "migrations_exclusive_options": "--auto, --skip, e --force-rerun son las opcions exclusivas.",
+ "migrations_failed_to_load_migration": "Cargament impossible de la migracion {id} : {error}",
+ "migrations_already_ran": "Aquelas migracions s’executèron ja : {ids}",
+ "diagnosis_basesystem_ynh_main_version": "Lo servidor fonciona amb YunoHost {main_version} ({repo})",
+ "migrations_dependencies_not_satisfied": "Executatz aquestas migracions : « {dependencies_id} », abans la migracion {id}.",
+ "migrations_no_such_migration": "I a pas cap de migracion apelada « {id} »",
+ "migrations_not_pending_cant_skip": "Aquestas migracions son pas en espèra, las podètz pas doncas ignorar : {ids}",
+ "app_action_broke_system": "Aquesta accion sembla aver copat de servicis importants : {services}",
+ "diagnosis_ip_no_ipv6": "Lo servidor a pas d’adreça IPv6 activa.",
+ "diagnosis_ip_not_connected_at_all": "Lo servidor sembla pas connectat a Internet ?!",
+ "diagnosis_description_regenconf": "Configuracion sistèma",
+ "diagnosis_http_ok": "Lo domeni {domain} accessible de l’exterior.",
+ "app_full_domain_unavailable": "Aquesta aplicacion a d’èsser installada sul seu pròpri domeni, mas i a d’autras aplicacions installadas sus aqueste domeni « {domain} ». Podètz utilizar allòc un josdomeni dedicat a aquesta aplicacion.",
+ "diagnosis_dns_bad_conf": "Configuracion DNS incorrècta o inexistenta pel domeni {domain} (categoria {category})",
+ "diagnosis_ram_verylow": "Lo sistèma a solament {available} ({available_percent}%) de memòria RAM disponibla ! (d’un total de {total})",
+ "diagnosis_ram_ok": "Lo sistèma a encara {available} ({available_percent}%) de memòria RAM disponibla d’un total de {total}).",
+ "permission_already_allowed": "Lo grop « {group} » a ja la permission « {permission} » activada",
+ "permission_already_disallowed": "Lo grop « {group} » a ja la permission « {permission} » desactivada",
+ "permission_cannot_remove_main": "La supression d’una permission màger es pas autorizada",
+ "log_permission_url": "Actualizacion de l’URL ligada a la permission « {} »",
+ "app_install_failed": "Installacion impossibla de {app} : {error}",
+ "app_install_script_failed": "Una error s’es producha en installar lo script de l’aplicacion",
+ "apps_already_up_to_date": "Totas las aplicacions son ja al jorn",
+ "app_remove_after_failed_install": "Supression de l’aplicacion aprèp fracàs de l’installacion...",
+ "group_already_exist": "Lo grop {group} existís ja",
+ "group_already_exist_on_system": "Lo grop {group} existís ja dins lo sistèma de grops",
+ "group_user_not_in_group": "L’utilizaire {user} es pas dins lo grop {group}",
+ "log_user_permission_reset": "Restablir la permission « {} »",
+ "user_already_exists": "L’utilizaire {user} existís ja",
+ "diagnosis_basesystem_host": "Lo servidor fonciona amb Debian {debian_version}",
+ "diagnosis_basesystem_kernel": "Lo servidor fonciona amb lo nuclèu Linuxl {kernel_version}",
+ "diagnosis_basesystem_ynh_single_version": "{package} version : {version} ({repo})",
+ "diagnosis_basesystem_ynh_inconsistent_versions": "Utilizatz de versions inconsistentas dels paquets de YunoHost… probablament a causa d'una actualizacion fracassada o parciala.",
+ "diagnosis_ignored_issues": "(+ {nb_ignored} problèma(es) ignorat(s))",
+ "diagnosis_everything_ok": "Tot sembla corrècte per {category} !",
+ "diagnosis_ip_connected_ipv4": "Lo servidor es connectat a Internet via IPv4 !",
+ "diagnosis_ip_no_ipv4": "Lo servidor a pas d’adreça IPv4 activa.",
+ "diagnosis_ip_connected_ipv6": "Lo servidor es connectat a Internet via IPv6 !",
+ "diagnosis_ip_dnsresolution_working": "La resolucion del nom de domeni fonciona !",
+ "diagnosis_dns_good_conf": "Bona configuracion DNS pel domeni {domain} (categoria {category})",
+ "diagnosis_failed_for_category": "Lo diagnostic a reüssit per la categoria « {category} » : {error}",
+ "diagnosis_cache_still_valid": "(Memòria cache totjorn valida pel diagnostic {category}. Se tornarà pas diagnosticar pel moment !)",
+ "diagnosis_found_errors": "{errors} errors importantas trobadas ligadas a {category} !",
+ "diagnosis_services_bad_status": "Lo servici {service} es {status} :(",
+ "diagnosis_swap_ok": "Lo sistèma a {total} d’escambi !",
+ "diagnosis_regenconf_allgood": "Totes los fichièrs de configuracion son confòrmes a la configuracion recomandada !",
+ "diagnosis_regenconf_manually_modified": "Lo fichièr de configuracion {file} foguèt modificat manualament.",
+ "diagnosis_regenconf_manually_modified_details": "Es probablament bon tan que sabètz çò que fasètz ;) !",
+ "diagnosis_security_vulnerable_to_meltdown": "Semblatz èsser vulnerable a la vulnerabilitat de seguretat critica de Meltdown",
+ "diagnosis_description_basesystem": "Sistèma de basa",
+ "diagnosis_description_ip": "Connectivitat Internet",
+ "diagnosis_description_dnsrecords": "Enregistraments DNS",
+ "diagnosis_description_services": "Verificacion d’estat de servicis",
+ "diagnosis_description_systemresources": "Resorgas sistèma",
+ "diagnosis_description_ports": "Exposicion dels pòrts",
+ "diagnosis_ports_unreachable": "Lo pòrt {port} es pas accessible de l’exterior.",
+ "diagnosis_ports_ok": "Lo pòrt {port} es accessible de l’exterior.",
+ "diagnosis_http_unreachable": "Lo domeni {domain} es pas accessible via HTTP de l’exterior.",
+ "diagnosis_unknown_categories": "La categorias seguentas son desconegudas : {categories}",
+ "diagnosis_ram_low": "Lo sistèma a {available} ({available_percent}%) de memòria RAM disponibla d’un total de {total}). Atencion.",
+ "log_permission_create": "Crear la permission « {} »",
+ "log_permission_delete": "Suprimir la permission « {} »",
+ "log_user_group_create": "Crear lo grop « {} »",
+ "log_user_permission_update": "Actualizacion dels accèsses per la permission « {} »",
+ "operation_interrupted": "L’operacion es estada interrompuda manualament ?",
+ "group_cannot_be_deleted": "Lo grop « {group} » pòt pas èsser suprimit manualament.",
+ "diagnosis_found_warnings": "Trobat {warnings} element(s) que se poirián melhorar per {category}.",
+ "diagnosis_dns_missing_record": "Segon la configuracion DNS recomandada, vos calriá ajustar un enregistrament DNS\ntipe: {type}\nnom: {name}\nvalor: {value}",
+ "diagnosis_dns_discrepancy": "La configuracion DNS seguenta sembla pas la configuracion recomandada :
Tipe : {type}
Nom : {name}
Valors actualas : {current]
Valor esperada : {value}
",
+ "diagnosis_ports_could_not_diagnose": "Impossible de diagnosticar se los pòrts son accessibles de l’exterior.",
+ "diagnosis_ports_could_not_diagnose_details": "Error : {error}",
+ "diagnosis_http_could_not_diagnose": "Impossible de diagnosticar se lo domeni es accessible de l’exterior.",
+ "diagnosis_http_could_not_diagnose_details": "Error : {error}",
+ "apps_catalog_updating": "Actualizacion del catalòg d’aplicacion...",
+ "apps_catalog_failed_to_download": "Telecargament impossible del catalòg d’aplicacions {apps_catalog} : {error}",
+ "apps_catalog_obsolete_cache": "La memòria cache del catalòg d’aplicacion es voida o obsolèta.",
+ "apps_catalog_update_success": "Lo catalòg d’aplicacions es a jorn !",
+ "diagnosis_description_mail": "Corrièl",
+ "app_upgrade_script_failed": "Una error s’es producha pendent l’execucion de l’script de mesa a nivèl de l’aplicacion",
+ "diagnosis_cant_run_because_of_dep": "Execucion impossibla del diagnostic per {category} mentre que i a de problèmas importants ligats amb {dep}.",
+ "diagnosis_found_errors_and_warnings": "Avèm trobat {errors} problèma(s) important(s) (e {warnings} avis(es)) ligats a {category} !",
+ "diagnosis_failed": "Recuperacion impossibla dels resultats del diagnostic per la categoria « {category} » : {error}",
+ "diagnosis_ip_broken_dnsresolution": "La resolucion del nom de domeni es copada per una rason… Lo parafuòc bloca las requèstas DNS ?",
+ "diagnosis_no_cache": "I a pas encara de diagnostic de cache per la categoria « {category} »",
+ "apps_catalog_init_success": "Sistèma de catalòg d’aplicacion iniciat !",
+ "diagnosis_services_running": "Lo servici {service} es lançat !",
+ "diagnosis_services_conf_broken": "La configuracion es copada pel servici {service} !",
+ "diagnosis_ports_needed_by": "Es necessari qu’aqueste pòrt siá accessible pel servici {service}",
+ "diagnosis_diskusage_low": "Lo lòc d’emmagazinatge {mountpoint}
(sul periferic {device}
) a solament {free} ({free_percent}%). Siatz prudent.",
+ "dyndns_provider_unreachable": "Impossible d’atenher lo provesidor Dyndns : siá vòstre YunoHost es pas corrèctament connectat a Internet siá lo servidor dynette es copat.",
+ "diagnosis_services_bad_status_tip": "Podètz ensajar de reaviar lo servici, e se non fonciona pas, podètz agachar los jornals de servici a la pagina web d’administracion(en linha de comanda podètz utilizar yunohost service restart {service} e yunohost service log {service} ).",
+ "diagnosis_http_connection_error": "Error de connexion : connexion impossibla al domeni demandat, benlèu qu’es pas accessible.",
+ "group_user_already_in_group": "L’utilizaire {user} es ja dins lo grop « {group} »",
+ "diagnosis_ip_broken_resolvconf": "La resolucion del nom de domeni sembla copada sul servidor, poiriá èsser ligada al fait que /etc/resolv.conf
manda pas a 127.0.0.1
.",
+ "diagnosis_ip_weird_resolvconf": "La resolucion del nom de domeni sembla foncionar, mas sembla qu’utiilizatz un fichièr /etc/resolv.conf
personalizat.",
+ "diagnosis_diskusage_verylow": "Lo lòc d’emmagazinatge {mountpoint}
(sul periferic {device}
) a solament {free} ({free_percent}%). Deuriatz considerar de liberar un pauc d’espaci.",
+ "global_settings_setting_pop3_enabled": "Activar lo protocòl POP3 pel servidor de corrièr",
+ "diagnosis_diskusage_ok": "Lo lòc d’emmagazinatge {mountpoint}
(sul periferic {device}
) a encara {free} ({free_percent}%) de liure !",
+ "diagnosis_swap_none": "Lo sistèma a pas cap de memòria d’escambi. Auriatz de considerar d’ajustar almens {recommended} d’escambi per evitar las situacions ont lo sistèma manca de memòria.",
+ "diagnosis_swap_notsomuch": "Lo sistèma a solament {total} de memòria d’escambi. Auriatz de considerar d’ajustar almens {recommended} d’escambi per evitar las situacions ont lo sistèma manca de memòria.",
+ "diagnosis_description_web": "Web",
+ "diagnosis_ip_global": "IP Global : {global}
",
+ "diagnosis_ip_local": "IP locala : {local}
",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "Error : {error}",
+ "diagnosis_mail_queue_unavailable_details": "Error : {error}",
+ "diagnosis_basesystem_hardware": "L’arquitectura del servidor es {virt} {arch}",
+ "backup_archive_corrupted": "Sembla que l’archiu de la salvagarda « {archive} » es corromput : {error}",
+ "diagnosis_domain_expires_in": "{domain} expiraà d’aquí {days} jorns.",
+ "migration_0015_cleaning_up": "Netejatge de la memòria cache e dels paquets pas mai necessaris…",
+ "restore_already_installed_apps": "Restauracion impossibla de las aplicacions seguentas que son ja installadas : {apps}",
+ "diagnosis_package_installed_from_sury": "D’unes paquets sistèma devon èsser meses a nivèl",
+ "ask_user_domain": "Domeni d’utilizar per l’adreça de corrièl de l’utilizaire e lo compte XMPP",
+ "app_manifest_install_ask_is_public": "Aquesta aplicacion serà visible pels visitaires anonims ?",
+ "app_manifest_install_ask_admin": "Causissètz un administrator per aquesta aplicacion",
+ "app_manifest_install_ask_password": "Causissètz lo senhal administrator per aquesta aplicacion",
+ "app_manifest_install_ask_path": "Causissètz lo camin ont volètz installar aquesta aplicacion",
+ "app_manifest_install_ask_domain": "Causissètz lo domeni ont volètz installar aquesta aplicacion",
+ "app_argument_password_no_default": "Error pendent l’analisi de l’argument del senhal « {name} » : l’argument de senhal pòt pas aver de valor per defaut per de rason de seguretat",
+ "app_label_deprecated": "Aquesta comanda es estada renduda obsolèta. Mercés d'utilizar lo nòva \"yunohost user permission update\" per gerir letiquetada de l'aplication",
+ "additional_urls_already_removed": "URL addicionala {url} es ja estada elimida per la permission «#permission:s»",
+ "additional_urls_already_added": "URL addicionadal «{url}'» es ja estada aponduda per la permission «{permission}»",
+ "migration_0015_yunohost_upgrade": "Aviada de la mesa a jorn de YunoHost...",
+ "migration_0015_main_upgrade": "Aviada de la mesa a nivèl generala...",
+ "migration_0015_patching_sources_list": "Mesa a jorn del fichièr sources.lists...",
+ "migration_0015_start": "Aviar la migracion cap a Buster",
+ "migration_description_0017_postgresql_9p6_to_11": "Migrar las basas de donadas de PostgreSQL 9.6 cap a 11",
+ "migration_description_0016_php70_to_php73_pools": "Migrar los fichièrs de configuracion php7.0 cap a php7.3",
+ "migration_description_0015_migrate_to_buster": "Mesa a nivèl dels sistèmas Debian Buster e YunoHost 4.x",
+ "migrating_legacy_permission_settings": "Migracion dels paramètres de permission ancians...",
+ "log_app_action_run": "Executar l’accion de l’aplicacion « {} »",
+ "diagnosis_basesystem_hardware_model": "Lo modèl del servidor es {model}",
+ "backup_archive_cant_retrieve_info_json": "Obtencion impossibla de las informacions de l’archiu « {archive} »... Se pòt pas recuperar lo fichièr info.json (o es pas un fichièr json valid).",
+ "app_packaging_format_not_supported": "Se pòt pas installar aquesta aplicacion pr’amor que son format es pas pres en carga per vòstra version de YunoHost. Deuriatz considerar actualizar lo sistèma.",
+ "diagnosis_mail_fcrdns_ok": "Vòstre DNS inverse es corrèctament configurat !",
+ "diagnosis_mail_outgoing_port_25_ok": "Lo servidor de messatge SMTP pòt enviar de corrièls (lo pòrt 25 es pas blocat).",
+ "diagnosis_domain_expiration_warning": "D’unes domenis expiraràn lèu !",
+ "diagnosis_domain_expiration_success": "Vòstres domenis son enregistrats e expiraràn pas lèu.",
+ "diagnosis_domain_not_found_details": "Lo domeni {domain} existís pas a la basa de donadas WHOIS o a expirat !",
+ "diagnosis_domain_expiration_not_found": "Impossible de verificar la data d’expiracion d’unes domenis",
+ "backup_create_size_estimation": "L’archiu contendrà apr’aquí {size} de donadas.",
+ "app_restore_script_failed": "Una error s’es producha a l’interior del script de restauracion de l’aplicacion"
+}
\ No newline at end of file
diff --git a/locales/pl.json b/locales/pl.json
index 0967ef424..caf108367 100644
--- a/locales/pl.json
+++ b/locales/pl.json
@@ -1 +1,12 @@
-{}
+{
+ "password_too_simple_1": "Hasło musi mieć co najmniej 8 znaków",
+ "app_already_up_to_date": "{app} jest obecnie aktualna",
+ "app_already_installed": "{app} jest już zainstalowane",
+ "already_up_to_date": "Nic do zrobienia. Wszystko jest obecnie aktualne.",
+ "admin_password_too_long": "Proszę wybrać hasło krótsze niż 127 znaków",
+ "admin_password_changed": "Hasło administratora zostało zmienione",
+ "admin_password_change_failed": "Nie można zmienić hasła",
+ "admin_password": "Hasło administratora",
+ "action_invalid": "Nieprawidłowa operacja '{action}'",
+ "aborting": "Przerywanie."
+}
\ No newline at end of file
diff --git a/locales/pt.json b/locales/pt.json
index 80a0d5ddd..534e0cb27 100644
--- a/locales/pt.json
+++ b/locales/pt.json
@@ -1,134 +1,83 @@
{
- "action_invalid": "Acção Inválida '{action:s}'",
+ "action_invalid": "Acção Inválida '{action}'",
"admin_password": "Senha de administração",
"admin_password_change_failed": "Não foi possível alterar a senha",
- "admin_password_changed": "A palavra-passe de administração foi alterada com sucesso",
- "app_already_installed": "{app:s} já está instalada",
- "app_extraction_failed": "Não foi possível extrair os ficheiros para instalação",
- "app_id_invalid": "A ID da aplicação é inválida",
- "app_install_files_invalid": "Ficheiros para instalação corrompidos",
- "app_location_already_used": "A aplicação {app} Já está instalada nesta localização ({path})",
- "app_location_install_failed": "Não é possível instalar a aplicação neste diretório porque está em conflito com a aplicação '{other_app}', que já está instalada no diretório '{other_path}'",
+ "admin_password_changed": "A senha da administração foi alterada",
+ "app_already_installed": "{app} já está instalada",
+ "app_extraction_failed": "Não foi possível extrair os arquivos para instalação",
+ "app_id_invalid": "App ID invaĺido",
+ "app_install_files_invalid": "Esses arquivos não podem ser instalados",
"app_manifest_invalid": "Manifesto da aplicação inválido: {error}",
- "app_no_upgrade": "Não existem aplicações para atualizar",
- "app_not_installed": "{app:s} não está instalada",
- "app_recent_version_required": "{:s} requer uma versão mais recente da moulinette",
- "app_removed": "{app:s} removida com êxito",
- "app_sources_fetch_failed": "Incapaz obter os ficheiros fonte",
+ "app_not_installed": "Não foi possível encontrar {app} na lista de aplicações instaladas: {all_apps}",
+ "app_removed": "{app} desinstalada",
+ "app_sources_fetch_failed": "Não foi possível carregar os arquivos de código fonte, a URL está correta?",
"app_unknown": "Aplicação desconhecida",
- "app_upgrade_failed": "Não foi possível atualizar {app:s}",
- "app_upgraded": "{app:s} atualizada com sucesso",
- "appslist_fetched": "A lista de aplicações, {appslist:s}, foi trazida com sucesso",
- "appslist_removed": "A Lista de aplicações {appslist:s} foi removida",
- "appslist_retrieve_error": "Não foi possível obter a lista de aplicações remotas {appslist:s}: {error:s}",
- "appslist_unknown": "Desconhece-se a lista de aplicaçoes {appslist:s}.",
- "ask_current_admin_password": "Senha atual da administração",
- "ask_email": "Endereço de Email",
+ "app_upgrade_failed": "Não foi possível atualizar {app}: {error}",
+ "app_upgraded": "{app} atualizado",
"ask_firstname": "Primeiro nome",
"ask_lastname": "Último nome",
- "ask_list_to_remove": "Lista para remover",
"ask_main_domain": "Domínio principal",
"ask_new_admin_password": "Nova senha de administração",
"ask_password": "Senha",
"backup_created": "Backup completo",
- "backup_creating_archive": "A criar ficheiro de backup...",
- "backup_invalid_archive": "Arquivo de backup inválido",
- "backup_output_directory_not_empty": "A pasta de destino não se encontra vazia",
- "custom_app_url_required": "Deve fornecer um link para atualizar a sua aplicação personalizada {app:s}",
- "custom_appslist_name_required": "Deve fornecer um nome para a sua lista de aplicações personalizada",
+ "backup_output_directory_not_empty": "Você deve escolher um diretório de saída que esteja vazio",
+ "custom_app_url_required": "Deve fornecer um link para atualizar a sua aplicação personalizada {app}",
"domain_cert_gen_failed": "Não foi possível gerar o certificado",
"domain_created": "Domínio criado com êxito",
- "domain_creation_failed": "Não foi possível criar o domínio",
+ "domain_creation_failed": "Não foi possível criar o domínio {domain}: {error}",
"domain_deleted": "Domínio removido com êxito",
- "domain_deletion_failed": "Não foi possível eliminar o domínio",
+ "domain_deletion_failed": "Não foi possível eliminar o domínio {domain}: {error}",
"domain_dyndns_already_subscribed": "Já subscreveu um domínio DynDNS",
- "domain_dyndns_invalid": "Domínio inválido para ser utilizado com DynDNS",
"domain_dyndns_root_unknown": "Domínio root (administrador) DynDNS desconhecido",
"domain_exists": "O domínio já existe",
"domain_uninstall_app_first": "Existem uma ou mais aplicações instaladas neste domínio. Por favor desinstale-as antes de proceder com a remoção do domínio.",
- "domain_unknown": "Domínio desconhecido",
- "domain_zone_exists": "Ficheiro para zona DMZ já existe",
- "domain_zone_not_found": "Ficheiro para zona DMZ não encontrado no domínio {:s}",
"done": "Concluído.",
"downloading": "Transferência em curso...",
- "dyndns_cron_installed": "Gestor de tarefas cron DynDNS instalado com êxito",
- "dyndns_cron_remove_failed": "Não foi possível remover o gestor de tarefas cron DynDNS",
- "dyndns_cron_removed": "Gestor de tarefas cron DynDNS removido com êxito",
- "dyndns_ip_update_failed": "Não foi possível atualizar o endereço IP a partir de DynDNS",
- "dyndns_ip_updated": "Endereço IP atualizado com êxito a partir de DynDNS",
+ "dyndns_ip_update_failed": "Não foi possível atualizar o endereço IP para DynDNS",
+ "dyndns_ip_updated": "Endereço IP atualizado com êxito para DynDNS",
"dyndns_key_generating": "A chave DNS está a ser gerada, isto pode demorar um pouco...",
"dyndns_registered": "Dom+inio DynDNS registado com êxito",
- "dyndns_registration_failed": "Não foi possível registar o domínio DynDNS: {error:s}",
- "dyndns_unavailable": "Subdomínio DynDNS indisponível",
- "executing_script": "A executar o script...",
+ "dyndns_registration_failed": "Não foi possível registar o domínio DynDNS: {error}",
+ "dyndns_unavailable": "O domínio '{domain}' não está disponível.",
"extracting": "Extração em curso...",
- "field_invalid": "Campo inválido '{:s}'",
+ "field_invalid": "Campo inválido '{}'",
"firewall_reloaded": "Firewall recarregada com êxito",
- "hook_argument_missing": "Argumento em falta '{:s}'",
- "hook_choice_invalid": "Escolha inválida '{:s}'",
"installation_complete": "Instalação concluída",
- "installation_failed": "A instalação falhou",
"iptables_unavailable": "Não pode alterar aqui a iptables. Ou o seu kernel não o suporta ou está num espaço reservado.",
- "ldap_initialized": "LDAP inicializada com êxito",
- "license_undefined": "indefinido",
- "mail_alias_remove_failed": "Não foi possível remover a etiqueta de correio '{mail:s}'",
- "mail_domain_unknown": "Domínio de endereço de correio desconhecido '{domain:s}'",
- "mail_forward_remove_failed": "Não foi possível remover o reencaminhamento de correio '{mail:s}'",
- "maindomain_change_failed": "Incapaz alterar o domínio raiz",
- "maindomain_changed": "Domínio raiz alterado com êxito",
- "monitor_disabled": "Monitorização do servidor parada com êxito",
- "monitor_enabled": "Monitorização do servidor ativada com êxito",
- "monitor_glances_con_failed": "Não foi possível ligar ao servidor Glances",
- "monitor_not_enabled": "A monitorização do servidor não está ativa",
- "monitor_period_invalid": "Período de tempo inválido",
- "monitor_stats_file_not_found": "Ficheiro de estatísticas não encontrado",
- "monitor_stats_no_update": "Não existem estatísticas de monitorização para atualizar",
- "monitor_stats_period_unavailable": "Não existem estatísticas disponíveis para este período",
- "mountpoint_unknown": "Ponto de montagem desconhecido",
- "mysql_db_creation_failed": "Criação da base de dados MySQL falhou",
- "mysql_db_init_failed": "Inicialização da base de dados MySQL falhou",
- "mysql_db_initialized": "Base de dados MySQL iniciada com êxito",
- "new_domain_required": "Deve escrever um novo domínio principal",
- "no_appslist_found": "Não foi encontrada a lista de aplicações",
- "no_internet_connection": "O servidor não está ligado à Internet",
- "packages_no_upgrade": "Não existem pacotes para atualizar",
- "packages_upgrade_critical_later": "Os pacotes críticos ({packages:s}) serão atualizados depois",
+ "mail_alias_remove_failed": "Não foi possível remover a etiqueta de correio '{mail}'",
+ "mail_domain_unknown": "Domínio de endereço de correio '{domain}' inválido. Por favor, usa um domínio administrado per esse servidor.",
+ "mail_forward_remove_failed": "Não foi possível remover o reencaminhamento de correio '{mail}'",
+ "main_domain_change_failed": "Incapaz alterar o domínio raiz",
+ "main_domain_changed": "Domínio raiz alterado com êxito",
"packages_upgrade_failed": "Não foi possível atualizar todos os pacotes",
- "path_removal_failed": "Incapaz remover o caminho {:s}",
"pattern_domain": "Deve ser um nome de domínio válido (p.e. meu-dominio.org)",
"pattern_email": "Deve ser um endereço de correio válido (p.e. alguem@dominio.org)",
"pattern_firstname": "Deve ser um primeiro nome válido",
"pattern_lastname": "Deve ser um último nome válido",
- "pattern_listname": "Apenas são permitidos caracteres alfanuméricos e travessões",
"pattern_password": "Deve ter no mínimo 3 caracteres",
- "pattern_port": "Deve ser um número de porta válido (entre 0-65535)",
"pattern_username": "Devem apenas ser carácteres minúsculos alfanuméricos e subtraços",
- "restore_confirm_yunohost_installed": "Quer mesmo restaurar um sistema já instalado? [{answers:s}]",
- "service_add_failed": "Incapaz adicionar serviço '{service:s}'",
+ "restore_confirm_yunohost_installed": "Quer mesmo restaurar um sistema já instalado? [{answers}]",
+ "service_add_failed": "Incapaz adicionar serviço '{service}'",
"service_added": "Serviço adicionado com êxito",
- "service_already_started": "O serviço '{service:s}' já está em execussão",
- "service_already_stopped": "O serviço '{service:s}' já está parado",
- "service_cmd_exec_failed": "Incapaz executar o comando '{command:s}'",
- "service_disable_failed": "Incapaz desativar o serviço '{service:s}'",
- "service_disabled": "O serviço '{service:s}' foi desativado com êxito",
- "service_enable_failed": "Incapaz de ativar o serviço '{service:s}'",
- "service_enabled": "Serviço '{service:s}' ativado com êxito",
- "service_no_log": "Não existem registos para mostrar do serviço '{service:s}'",
- "service_remove_failed": "Incapaz de remover o serviço '{service:s}'",
+ "service_already_started": "O serviço '{service}' já está em execussão",
+ "service_already_stopped": "O serviço '{service}' já está parado",
+ "service_cmd_exec_failed": "Incapaz executar o comando '{command}'",
+ "service_disable_failed": "Incapaz desativar o serviço '{service}'",
+ "service_disabled": "O serviço '{service}' foi desativado com êxito",
+ "service_enable_failed": "Incapaz de ativar o serviço '{service}'",
+ "service_enabled": "Serviço '{service}' ativado com êxito",
+ "service_remove_failed": "Incapaz de remover o serviço '{service}'",
"service_removed": "Serviço eliminado com êxito",
- "service_start_failed": "Não foi possível iniciar o serviço '{service:s}'",
- "service_started": "O serviço '{service:s}' foi iniciado com êxito",
- "service_status_failed": "Incapaz determinar o estado do serviço '{service:s}'",
- "service_stop_failed": "Incapaz parar o serviço '{service:s}'",
- "service_stopped": "O serviço '{service:s}' foi parado com êxito",
- "service_unknown": "Serviço desconhecido '{service:s}'",
+ "service_start_failed": "Não foi possível iniciar o serviço '{service}'",
+ "service_started": "O serviço '{service}' foi iniciado com êxito",
+ "service_stop_failed": "Incapaz parar o serviço '{service}'",
+ "service_stopped": "O serviço '{service}' foi parado com êxito",
+ "service_unknown": "Serviço desconhecido '{service}'",
"ssowat_conf_generated": "Configuração SSOwat gerada com êxito",
"ssowat_conf_updated": "Configuração persistente SSOwat atualizada com êxito",
"system_upgraded": "Sistema atualizado com êxito",
"system_username_exists": "O utilizador já existe no registo do sistema",
"unexpected_error": "Ocorreu um erro inesperado",
- "unit_unknown": "Unidade desconhecida '{unit:s}'",
- "update_cache_failed": "Não foi possível atualizar os cabeçalhos APT",
"updating_apt_cache": "A atualizar a lista de pacotes disponíveis...",
"upgrade_complete": "Atualização completa",
"upgrading_packages": "Atualização de pacotes em curso...",
@@ -136,62 +85,111 @@
"user_creation_failed": "Não foi possível criar o utilizador",
"user_deleted": "Utilizador eliminado com êxito",
"user_deletion_failed": "Incapaz eliminar o utilizador",
- "user_info_failed": "Incapaz obter informações sobre o utilizador",
"user_unknown": "Utilizador desconhecido",
"user_update_failed": "Não foi possível atualizar o utilizador",
"user_updated": "Utilizador atualizado com êxito",
"yunohost_already_installed": "AYunoHost já está instalado",
- "yunohost_ca_creation_failed": "Incapaz criar o certificado de autoridade",
"yunohost_configured": "YunoHost configurada com êxito",
"yunohost_installing": "A instalar a YunoHost...",
"yunohost_not_installed": "YunoHost ainda não está corretamente configurado. Por favor execute as 'ferramentas pós-instalação yunohost'.",
- "app_incompatible": "A aplicação {app} é incompatível com a sua versão de Yunohost",
- "app_not_correctly_installed": "{app:s} parece não estar corretamente instalada",
- "app_not_properly_removed": "{app:s} não foi corretamente removido",
+ "app_not_correctly_installed": "{app} parece não estar corretamente instalada",
+ "app_not_properly_removed": "{app} não foi corretamente removido",
"app_requirements_checking": "Verificando os pacotes necessários para {app}...",
"app_unsupported_remote_type": "A aplicação não possui suporte ao tipo remoto utilizado",
- "backup_archive_app_not_found": "A aplicação '{app:s}' não foi encontrada no arquivo de backup",
- "backup_archive_broken_link": "Impossível acessar o arquivo de backup (link quebrado ao {path:s})",
- "backup_archive_hook_not_exec": "O gancho '{hook:s}' não foi executado neste backup",
- "backup_archive_name_exists": "O nome do arquivo de backup já existe",
- "backup_archive_open_failed": "Não é possível abrir o arquivo de backup",
- "backup_cleaning_failed": "Não é possível limpar a pasta temporária de backups",
- "backup_creation_failed": "A criação do backup falhou",
- "backup_delete_error": "Impossível apagar '{path:s}'",
- "backup_deleted": "O backup foi suprimido",
- "backup_extracting_archive": "Extraindo arquivo de backup...",
- "backup_hook_unknown": "Gancho de backup '{hook:s}' desconhecido",
- "backup_nothings_done": "Não há nada para guardar",
- "backup_output_directory_forbidden": "Diretório de saída proibido. Os backups não podem ser criados em /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives subpastas",
- "app_already_installed_cant_change_url": "Este aplicativo já está instalado. A URL não pode ser alterada apenas por esta função. Olhe para o `app changeurl` se estiver disponível.",
- "app_already_up_to_date": "{app:s} já está atualizado",
- "app_argument_choice_invalid": "Escolha inválida para o argumento '{name:s}', deve ser um dos {choices:s}",
- "app_argument_invalid": "Valor inválido de argumento '{name:s}': {error:s}",
- "app_argument_required": "O argumento '{name:s}' é obrigatório",
- "app_change_url_failed_nginx_reload": "Falha ao reiniciar o nginx. Aqui está o retorno de 'nginx -t':\n{nginx_errors:s}",
- "app_change_no_change_url_script": "A aplicação {app_name:s} ainda não permite mudança da URL, talvez seja necessário atualiza-la.",
- "app_location_unavailable": "Esta url não está disponível ou está em conflito com outra aplicação já instalada",
- "app_package_need_update": "O pacote da aplicação {app} precisa ser atualizado para aderir as mudanças do YunoHost",
- "app_requirements_failed": "Não foi possível atender aos requisitos da aplicação {app}: {error}",
- "app_upgrade_app_name": "Atualizando aplicação {app}…",
+ "backup_archive_app_not_found": "Não foi possível encontrar {app} no arquivo de backup",
+ "backup_archive_broken_link": "Não foi possível acessar o arquivo de backup (link quebrado ao {path})",
+ "backup_archive_name_exists": "Já existe um arquivo de backup com esse nome.",
+ "backup_archive_open_failed": "Não foi possível abrir o arquivo de backup",
+ "backup_cleaning_failed": "Não foi possível limpar o diretório temporário de backup",
+ "backup_creation_failed": "Não foi possível criar o arquivo de backup",
+ "backup_delete_error": "Não foi possível remover '{path}'",
+ "backup_deleted": "Backup removido",
+ "backup_hook_unknown": "O gancho de backup '{hook}' é desconhecido",
+ "backup_nothings_done": "Nada há se salvar",
+ "backup_output_directory_forbidden": "Escolha um diretório de saída diferente. Backups não podem ser criados nos subdiretórios /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives",
+ "app_already_installed_cant_change_url": "Este aplicativo já está instalado. A URL não pode ser alterada apenas por esta função. Confira em `app changeurl` se está disponível.",
+ "app_already_up_to_date": "{app} já está atualizado",
+ "app_argument_choice_invalid": "Use uma das opções '{choices}' para o argumento '{name}' em vez de '{value}'",
+ "app_argument_invalid": "Escolha um valor válido para o argumento '{name}': {error}",
+ "app_argument_required": "O argumento '{name}' é obrigatório",
+ "app_location_unavailable": "Esta url ou não está disponível ou está em conflito com outra(s) aplicação(ões) já instalada(s):\n{apps}",
+ "app_upgrade_app_name": "Atualizando {app}…",
"app_upgrade_some_app_failed": "Não foi possível atualizar algumas aplicações",
- "appslist_corrupted_json": "Falha ao carregar a lista de aplicações. O arquivo {filename:s} aparenta estar corrompido.",
- "appslist_migrating": "Migando lista de aplicações {appslist:s}…",
- "appslist_name_already_tracked": "Já existe uma lista de aplicações registrada com o nome {name:s}.",
- "appslist_retrieve_bad_format": "O arquivo recuperado para a lista de aplicações {appslist:s} é invalido",
- "appslist_url_already_tracked": "Já existe uma lista de aplicações registrada com a url {url:s}.",
- "ask_path": "Caminho",
- "backup_abstract_method": "Este metodo de backup ainda não foi implementado",
- "backup_action_required": "Deve-se especificar algo a salvar",
- "backup_app_failed": "Não foi possível fazer o backup dos aplicativos '{app:s}'",
- "backup_applying_method_custom": "Chamando o metodo personalizado de backup '{method:s}'…",
- "backup_applying_method_tar": "Criando o arquivo tar de backup…",
- "backup_archive_mount_failed": "Falha ao montar o arquivo de backup",
- "backup_archive_name_unknown": "Desconhece-se o arquivo local de backup de nome '{name:s}'",
- "backup_archive_system_part_not_available": "A seção do sistema '{part:s}' está indisponivel neste backup",
- "backup_ask_for_copying_if_needed": "Alguns arquivos não consiguiram ser preparados para backup utilizando o metodo que não gasta espaço de disco temporariamente. Para realizar o backup {size:s}MB precisam ser usados temporariamente. Você concorda?",
- "backup_borg_not_implemented": "O método de backup Borg ainda não foi implementado.",
- "backup_cant_mount_uncompress_archive": "Não foi possível montar em modo leitura o diretorio de arquivos não comprimido",
- "backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar o arquivo",
- "app_change_url_identical_domains": "O antigo e o novo domínio / url_path são idênticos ('{domain:s}{path:s}'), nada para fazer."
-}
+ "backup_abstract_method": "Este método de backup ainda não foi implementado",
+ "backup_app_failed": "Não foi possível fazer o backup de '{app}'",
+ "backup_applying_method_custom": "Chamando o método personalizado de backup '{method}'…",
+ "backup_applying_method_tar": "Criando o arquivo TAR de backup…",
+ "backup_archive_name_unknown": "Desconhece-se o arquivo local de backup de nome '{name}'",
+ "backup_archive_system_part_not_available": "A seção do sistema '{part}' está indisponível neste backup",
+ "backup_ask_for_copying_if_needed": "Você quer efetuar o backup usando {size}MB temporariamente? (E necessário fazer dessa forma porque alguns arquivos não puderam ser preparados usando um método mais eficiente)",
+ "backup_cant_mount_uncompress_archive": "Não foi possível montar o arquivo descomprimido como protegido contra escrita",
+ "backup_copying_to_organize_the_archive": "Copiando {size}MB para organizar o arquivo",
+ "app_change_url_identical_domains": "O antigo e o novo domínio / url_path são idênticos ('{domain}{path}'), nada para fazer.",
+ "password_too_simple_1": "A senha precisa ter pelo menos 8 caracteres",
+ "admin_password_too_long": "Escolha uma senha que contenha menos de 127 caracteres",
+ "aborting": "Abortando.",
+ "app_change_url_no_script": "A aplicação '{app_name}' ainda não permite modificar a URL. Talvez devesse atualizá-la.",
+ "app_argument_password_no_default": "Erro ao interpretar argumento da senha '{name}': O argumento da senha não pode ter um valor padrão por segurança",
+ "app_action_cannot_be_ran_because_required_services_down": "Estes serviços devem estar funcionado para executar esta ação: {services}. Tente reiniciá-los para continuar (e possivelmente investigar o porquê de não estarem funcionado).",
+ "app_action_broke_system": "Esta ação parece ter quebrado estes serviços importantes: {services}",
+ "already_up_to_date": "Nada a ser feito. Tudo já está atualizado.",
+ "additional_urls_already_removed": "A URL adicional '{url}'já está removida para a permissão '{permission}'",
+ "additional_urls_already_added": "A URL adicional '{url}' já está adicionada para a permissão '{permission}'",
+ "app_install_script_failed": "Ocorreu um erro dentro do script de instalação do aplicativo",
+ "app_install_failed": "Não foi possível instalar {app}: {error}",
+ "app_full_domain_unavailable": "Desculpe, esse app deve ser instalado num domínio próprio mas já há outros apps instalados no domínio '{domain}'. Você pode usar um subdomínio dedicado a esse aplicativo.",
+ "app_change_url_success": "A URL agora é {domain}{path}",
+ "apps_catalog_obsolete_cache": "O cache do catálogo de aplicações está vazio ou obsoleto.",
+ "apps_catalog_failed_to_download": "Não foi possível fazer o download do catálogo de aplicações {apps_catalog}: {error}",
+ "apps_catalog_updating": "Atualizando o catálogo de aplicações...",
+ "apps_catalog_init_success": "Catálogo de aplicações do sistema inicializado!",
+ "apps_already_up_to_date": "Todas as aplicações já estão atualizadas",
+ "app_packaging_format_not_supported": "Essa aplicação não pode ser instalada porque o formato dela não é suportado pela sua versão do YunoHost. Considere atualizar seu sistema.",
+ "app_upgrade_script_failed": "Ocorreu um erro dentro do script de atualização da aplicação",
+ "app_upgrade_several_apps": "As seguintes aplicações serão atualizadas: {apps}",
+ "app_start_restore": "Restaurando {app}...",
+ "app_start_backup": "Obtendo os arquivos para fazer o backup de {app}...",
+ "app_start_remove": "Removendo {app}...",
+ "app_start_install": "Instalando {app}...",
+ "app_restore_script_failed": "Ocorreu um erro dentro do script de restauração da aplicação",
+ "app_restore_failed": "Não foi possível restaurar {app}: {error}",
+ "app_remove_after_failed_install": "Removendo a aplicação após a falha da instalação...",
+ "app_requirements_unmeet": "Os requisitos para a aplicação {app} não foram satisfeitos, o pacote {pkgname} ({version}) devem ser {spec}",
+ "app_not_upgraded": "Não foi possível atualizar a aplicação '{failed_app}' e, como consequência, a atualização das seguintes aplicações foi cancelada: {apps}",
+ "app_manifest_install_ask_is_public": "Essa aplicação deve ser visível para visitantes anônimos?",
+ "app_manifest_install_ask_admin": "Escolha um usuário de administrador para essa aplicação",
+ "app_manifest_install_ask_password": "Escolha uma senha de administrador para essa aplicação",
+ "app_manifest_install_ask_path": "Escolha o caminho da url (depois do domínio) em que essa aplicação deve ser instalada",
+ "app_manifest_install_ask_domain": "Escolha o domínio em que esta aplicação deve ser instalada",
+ "app_label_deprecated": "Este comando está deprecado! Por favor use o novo comando 'yunohost user permission update' para gerenciar a etiqueta da aplicação.",
+ "app_make_default_location_already_used": "Não foi passível fazer a aplicação '{app}' ser a padrão no domínio, '{domain}' já está sendo usado por '{other_app}'",
+ "backup_archive_writing_error": "Não foi possível adicionar os arquivos '{source}' (nomeados dentro do arquivo '{dest}') ao backup no arquivo comprimido '{archive}'",
+ "backup_archive_corrupted": "Parece que o arquivo de backup '{archive}' está corrompido: {error}",
+ "backup_archive_cant_retrieve_info_json": "Não foi possível carregar informações para o arquivo '{archive}'... Não foi possível carregar info.json (ou não é um JSON válido).",
+ "backup_applying_method_copy": "Copiando todos os arquivos para o backup...",
+ "backup_actually_backuping": "Criando cópia de backup dos arquivos obtidos...",
+ "ask_user_domain": "Domínio para usar para o endereço de email e conta XMPP do usuário",
+ "ask_new_path": "Novo caminho",
+ "ask_new_domain": "Novo domínio",
+ "apps_catalog_update_success": "O catálogo de aplicações foi atualizado!",
+ "backup_no_uncompress_archive_dir": "Não existe tal diretório de arquivo descomprimido",
+ "backup_mount_archive_for_restore": "Preparando o arquivo para restauração...",
+ "backup_method_tar_finished": "Arquivo de backup TAR criado",
+ "backup_method_custom_finished": "Método de backup personalizado '{method}' finalizado",
+ "backup_method_copy_finished": "Cópia de backup finalizada",
+ "backup_custom_mount_error": "O método personalizado de backup não pôde passar do passo de 'mount'",
+ "backup_custom_backup_error": "O método personalizado de backup não pôde passar do passo de 'backup'",
+ "backup_csv_creation_failed": "Não foi possível criar o arquivo CSV necessário para a restauração",
+ "backup_csv_addition_failed": "Não foi possível adicionar os arquivos que estarão no backup ao arquivo CSV",
+ "backup_create_size_estimation": "O arquivo irá conter cerca de {size} de dados.",
+ "backup_couldnt_bind": "Não foi possível vincular {src} ao {dest}",
+ "certmanager_attempt_to_replace_valid_cert": "Você está tentando sobrescrever um certificado bom e válido para o domínio {domain}! (Use --force para prosseguir mesmo assim)",
+ "backup_with_no_restore_script_for_app": "A aplicação {app} não tem um script de restauração, você não será capaz de automaticamente restaurar o backup dessa aplicação.",
+ "backup_with_no_backup_script_for_app": "A aplicação '{app}' não tem um script de backup. Ignorando.",
+ "backup_unable_to_organize_files": "Não foi possível usar o método rápido de organizar os arquivos no arquivo de backup",
+ "backup_system_part_failed": "Não foi possível fazer o backup da parte do sistema '{part}'",
+ "backup_running_hooks": "Executando os hooks de backup...",
+ "backup_permission": "Permissão de backup para {app}",
+ "backup_output_symlink_dir_broken": "O diretório de seu arquivo '{path}' é um link simbólico quebrado. Talvez você tenha esquecido de re/montar ou conectar o dispositivo de armazenamento para onde o link aponta.",
+ "backup_output_directory_required": "Você deve especificar um diretório de saída para o backup"
+}
\ No newline at end of file
diff --git a/locales/ru.json b/locales/ru.json
index 306a8763a..5a74524bf 100644
--- a/locales/ru.json
+++ b/locales/ru.json
@@ -1,46 +1,33 @@
{
- "action_invalid": "Неверное действие '{action:s}'",
+ "action_invalid": "Неверное действие '{action}'",
"admin_password": "Пароль администратора",
"admin_password_change_failed": "Невозможно изменить пароль",
"admin_password_changed": "Пароль администратора был изменен",
- "app_already_installed": "{app:s} уже установлено",
+ "app_already_installed": "{app} уже установлено",
"app_already_installed_cant_change_url": "Это приложение уже установлено. URL не может быть изменен только с помощью этой функции. Изучите `app changeurl`, если это доступно.",
- "app_argument_choice_invalid": "Неверный выбор для аргумента '{name:s}', Это должно быть '{choices:s}'",
- "app_argument_invalid": "Недопустимое значение аргумента '{name:s}': {error:s}'",
- "app_already_up_to_date": "{app:s} уже обновлено",
- "app_argument_required": "Аргумент '{name:s}' необходим",
- "app_change_no_change_url_script": "Приложение {app_name:s} не поддерживает изменение URL, вы должны обновить его.",
- "app_change_url_identical_domains": "Старый и новый domain/url_path идентичны ('{domain:s}{path:s}'), ничего делать не надо.",
- "app_change_url_no_script": "Приложение '{app_name:s}' не поддерживает изменение url. Наверное, вам нужно обновить приложение.",
- "app_change_url_success": "Успешно изменён {app:s} url на {domain:s}{path:s}",
+ "app_argument_choice_invalid": "Неверный выбор для аргумента '{name}', Это должно быть '{choices}'",
+ "app_argument_invalid": "Недопустимое значение аргумента '{name}': {error}'",
+ "app_already_up_to_date": "{app} уже обновлено",
+ "app_argument_required": "Аргумент '{name}' необходим",
+ "app_change_url_identical_domains": "Старый и новый domain/url_path идентичны ('{domain}{path}'), ничего делать не надо.",
+ "app_change_url_no_script": "Приложение '{app_name}' не поддерживает изменение url. Наверное, вам нужно обновить приложение.",
+ "app_change_url_success": "Успешно изменён {app} url на {domain}{path}",
"app_extraction_failed": "Невозможно извлечь файлы для инсталляции",
"app_id_invalid": "Неправильный id приложения",
- "app_incompatible": "Приложение {app} несовместимо с вашей версией YonoHost",
"app_install_files_invalid": "Неправильные файлы инсталляции",
- "app_location_already_used": "Приложение '{app}' уже установлено по этому адресу ({path})",
- "app_location_install_failed": "Невозможно установить приложение в это место, потому что оно конфликтует с приложением, '{other_app}' установленном на '{other_path}'",
- "app_location_unavailable": "Этот url отсутствует или конфликтует с уже установленным приложением или приложениями: {apps:s}",
+ "app_location_unavailable": "Этот url отсутствует или конфликтует с уже установленным приложением или приложениями: {apps}",
"app_manifest_invalid": "Недопустимый манифест приложения: {error}",
- "app_no_upgrade": "Нет приложений, требующих обновления",
- "app_not_correctly_installed": "{app:s} , кажется, установлены неправильно",
- "app_not_installed": "{app:s} не установлены",
- "app_not_properly_removed": "{app:s} удалены неправильно",
- "app_package_need_update": "Пакет приложения {app} должен быть обновлён в соответствии с изменениями YonoHost",
- "app_removed": "{app:s} удалено",
+ "app_not_correctly_installed": "{app} , кажется, установлены неправильно",
+ "app_not_installed": "{app} не установлены",
+ "app_not_properly_removed": "{app} удалены неправильно",
+ "app_removed": "{app} удалено",
"app_requirements_checking": "Проверяю необходимые пакеты для {app}...",
"app_sources_fetch_failed": "Невозможно получить исходные файлы",
"app_unknown": "Неизвестное приложение",
"app_upgrade_app_name": "Обновление приложения {app}...",
- "app_upgrade_failed": "Невозможно обновить {app:s}",
+ "app_upgrade_failed": "Невозможно обновить {app}",
"app_upgrade_some_app_failed": "Невозможно обновить некоторые приложения",
- "app_upgraded": "{app:s} обновлено",
- "appslist_corrupted_json": "Не могу загрузить список приложений. Кажется, {filename:s} поврежден.",
- "appslist_fetched": "Был выбран список приложений {appslist:s}",
- "appslist_name_already_tracked": "Уже есть зарегистрированный список приложений по имени {name:s}.",
- "appslist_removed": "Список приложений {appslist:s} удалён",
- "appslist_retrieve_bad_format": "Неверный файл списка приложений{appslist:s}",
- "appslist_retrieve_error": "Невозможно получить список удаленных приложений {appslist:s}: {error:s}",
- "appslist_unknown": "Список приложений {appslist:s} неизвестен.",
- "appslist_url_already_tracked": "Это уже зарегистрированный список приложений с url{url:s}.",
- "installation_complete": "Установка завершена"
-}
+ "app_upgraded": "{app} обновлено",
+ "installation_complete": "Установка завершена",
+ "password_too_simple_1": "Пароль должен быть не менее 8 символов"
+}
\ No newline at end of file
diff --git a/locales/sv.json b/locales/sv.json
index 0967ef424..39707d07c 100644
--- a/locales/sv.json
+++ b/locales/sv.json
@@ -1 +1,11 @@
-{}
+{
+ "password_too_simple_1": "Lösenordet måste bestå av minst åtta tecken",
+ "app_action_broke_system": "Åtgärden verkar ha fått följande viktiga tjänster att haverera: {services}",
+ "already_up_to_date": "Ingenting att göra. Allt är redan uppdaterat.",
+ "admin_password": "Administratörslösenord",
+ "admin_password_too_long": "Välj gärna ett lösenord som inte innehåller fler än 127 tecken",
+ "admin_password_change_failed": "Kan inte byta lösenord",
+ "action_invalid": "Ej tillåten åtgärd '{action}'",
+ "admin_password_changed": "Administratörskontots lösenord ändrades",
+ "aborting": "Avbryter."
+}
\ No newline at end of file
diff --git a/locales/tr.json b/locales/tr.json
index 0967ef424..6c881eec7 100644
--- a/locales/tr.json
+++ b/locales/tr.json
@@ -1 +1,3 @@
-{}
+{
+ "password_too_simple_1": "Şifre en az 8 karakter uzunluğunda olmalı"
+}
\ No newline at end of file
diff --git a/locales/uk.json b/locales/uk.json
new file mode 100644
index 000000000..35923908f
--- /dev/null
+++ b/locales/uk.json
@@ -0,0 +1,679 @@
+{
+ "app_manifest_install_ask_domain": "Оберіть домен, в якому треба встановити цей застосунок",
+ "app_manifest_invalid": "Щось не так з маніфестом застосунку: {error}",
+ "app_location_unavailable": "Ця URL-адреса або недоступна, або конфліктує з уже встановленим застосунком (застосунками):\n{apps}",
+ "app_label_deprecated": "Ця команда застаріла! Будь ласка, використовуйте нову команду 'yunohost user permission update' для управління заголовком застосунку.",
+ "app_make_default_location_already_used": "Неможливо зробити '{app}' типовим застосунком на домені, '{domain}' вже використовується '{other_app}'",
+ "app_install_script_failed": "Сталася помилка в скрипті встановлення застосунку",
+ "app_install_failed": "Неможливо встановити {app}: {error}",
+ "app_install_files_invalid": "Ці файли не можуть бути встановлені",
+ "app_id_invalid": "Неприпустимий ID застосунку",
+ "app_full_domain_unavailable": "Вибачте, цей застосунок повинен бути встановлений на власному домені, але інші застосунки вже встановлені на домені '{domain}'. Замість цього ви можете використовувати піддомен, призначений для цього застосунку.",
+ "app_extraction_failed": "Не вдалося витягти файли встановлення",
+ "app_change_url_success": "URL-адреса {app} тепер {domain}{path}",
+ "app_change_url_no_script": "Застосунок '{app_name}' поки не підтримує зміну URL-адрес. Можливо, вам слід оновити його.",
+ "app_change_url_identical_domains": "Старий і новий domain/url_path збігаються ('{domain}{path}'), нічого робити не треба.",
+ "app_argument_required": "Аргумент '{name}' необхідний",
+ "app_argument_password_no_default": "Помилка під час розбору аргументу пароля '{name}': аргумент пароля не може мати типове значення з причин безпеки",
+ "app_argument_invalid": "Виберіть правильне значення для аргументу '{name}': {error}",
+ "app_argument_choice_invalid": "Використовуйте один з цих варіантів '{choices}' для аргументу '{name}' замість '{value}'",
+ "app_already_up_to_date": "{app} має найостаннішу версію",
+ "app_already_installed_cant_change_url": "Цей застосунок уже встановлено. URL-адреса не може бути змінена тільки цією функцією. Перевірте в `app changeurl`, якщо вона доступна.",
+ "app_already_installed": "{app} уже встановлено",
+ "app_action_broke_system": "Ця дія, схоже, порушила роботу наступних важливих служб: {services}",
+ "app_action_cannot_be_ran_because_required_services_down": "Для виконання цієї дії повинні бути запущені наступні необхідні служби: {services}. Спробуйте перезапустити їх, щоб продовжити (і, можливо, з'ясувати, чому вони не працюють).",
+ "already_up_to_date": "Нічого не потрібно робити. Все вже актуально.",
+ "admin_password_too_long": "Будь ласка, виберіть пароль коротше 127 символів",
+ "admin_password_changed": "Пароль адміністрації було змінено",
+ "admin_password_change_failed": "Неможливо змінити пароль",
+ "admin_password": "Пароль адміністрації",
+ "additional_urls_already_removed": "Додаткова URL-адреса '{url}' вже видалена в додатковій URL-адресі для дозволу '{permission}'",
+ "additional_urls_already_added": "Додаткова URL-адреса '{url}' вже додана в додаткову URL-адресу для дозволу '{permission}'",
+ "action_invalid": "Неприпустима дія '{action}'",
+ "aborting": "Переривання.",
+ "diagnosis_description_web": "Мережа",
+ "service_reloaded_or_restarted": "Службу '{service}' була перезавантажено або перезапущено",
+ "service_reload_or_restart_failed": "Не вдалося перезавантажити або перезапустити службу '{service}' \n\nНедавні журнали служби: {logs}",
+ "service_restarted": "Службу '{service}' перезапущено",
+ "service_restart_failed": "Не вдалося запустити службу '{service}' \n\nНедавні журнали служб: {logs}",
+ "service_reloaded": "Служба '{service}' перезавантажена",
+ "service_reload_failed": "Не вдалося перезавантажити службу '{service}'\n\nОстанні журнали служби: {logs}",
+ "service_removed": "Служба '{service}' вилучена",
+ "service_remove_failed": "Не вдалося видалити службу '{service}'",
+ "service_regen_conf_is_deprecated": "'yunohost service regen-conf' застарів! Будь ласка, використовуйте 'yunohost tools regen-conf' замість цього.",
+ "service_enabled": "Служба '{service}' тепер буде автоматично запускатися під час завантаження системи.",
+ "service_enable_failed": "Неможливо змусити службу '{service}' автоматично запускатися під час завантаження.\n\nНедавні журнали служби: {logs}",
+ "service_disabled": "Служба '{service}' більше не буде запускатися під час завантаження системи.",
+ "service_disable_failed": "Неможливо змусити службу '{service}' не запускатися під час завантаження.\n\nОстанні журнали служби: {logs}",
+ "service_description_yunohost-firewall": "Управляє відкритими і закритими портами з'єднання зі службами",
+ "service_description_yunohost-api": "Управляє взаємодією між вебінтерфейсом YunoHost і системою",
+ "service_description_ssh": "Дозволяє віддалено під'єднуватися до сервера через термінал (протокол SSH)",
+ "service_description_slapd": "Зберігає користувачів, домени і пов'язані з ними дані",
+ "service_description_rspamd": "Фільтри спаму і інші функції, пов'язані з е-поштою",
+ "service_description_redis-server": "Спеціалізована база даних, яка використовується для швидкого доступу до даних, черги завдань і зв'язку між програмами",
+ "service_description_postfix": "Використовується для надсилання та отримання е-пошти",
+ "service_description_php7.3-fpm": "Запускає застосунки, написані мовою програмування PHP за допомогою NGINX",
+ "service_description_nginx": "Обслуговує або надає доступ до всіх вебсайтів, розміщених на вашому сервері",
+ "service_description_mysql": "Зберігає дані застосунків (база даних SQL)",
+ "service_description_metronome": "Управління обліковими записами миттєвих повідомлень XMPP",
+ "service_description_fail2ban": "Захист від перебирання (брутфорсу) та інших видів атак з Інтернету",
+ "service_description_dovecot": "Дозволяє поштовим клієнтам отримувати доступ до електронної пошти (через IMAP і POP3)",
+ "service_description_dnsmasq": "Обробляє роздільність доменних імен (DNS)",
+ "service_description_yunomdns": "Дозволяє вам отримати доступ до вашого сервера, використовуючи 'yunohost.local' у вашій локальній мережі",
+ "service_cmd_exec_failed": "Не вдалося виконати команду '{command}'",
+ "service_already_stopped": "Службу '{service}' вже зупинено",
+ "service_already_started": "Службу '{service}' вже запущено",
+ "service_added": "Службу '{service}' було додано",
+ "service_add_failed": "Не вдалося додати службу '{service}'",
+ "server_reboot_confirm": "Сервер буде негайно перезавантажено, ви впевнені? [{answers}]",
+ "server_reboot": "Сервер буде перезавантажено",
+ "server_shutdown_confirm": "Сервер буде негайно вимкнено, ви впевнені? [{answers}]",
+ "server_shutdown": "Сервер буде вимкнено",
+ "root_password_replaced_by_admin_password": "Ваш кореневий (root) пароль було замінено на пароль адміністратора.",
+ "root_password_desynchronized": "Пароль адміністратора було змінено, але YunoHost не зміг поширити це на кореневий (root) пароль!",
+ "restore_system_part_failed": "Не вдалося відновити системний розділ '{part}'",
+ "restore_running_hooks": "Запуск хуків відновлення…",
+ "restore_running_app_script": "Відновлення застосунку '{app}'…",
+ "restore_removing_tmp_dir_failed": "Неможливо видалити старий тимчасовий каталог",
+ "restore_nothings_done": "Нічого не було відновлено",
+ "restore_not_enough_disk_space": "Недостатньо місця (простір: {free_space} Б, необхідний простір: {needed_space} Б, межа безпеки: {margin: d} Б)",
+ "restore_may_be_not_enough_disk_space": "Схоже, у вашій системі недостатньо місця (вільно: {free_space} Б, необхідний простір: {needed_space} Б, межа безпеки: {margin: d} Б)",
+ "restore_hook_unavailable": "Скрипт відновлення для '{part}' недоступний у вашій системі і в архіві його теж немає",
+ "restore_failed": "Не вдалося відновити систему",
+ "restore_extracting": "Витягнення необхідних файлів з архіву…",
+ "restore_confirm_yunohost_installed": "Ви дійсно хочете відновити вже встановлену систему? [{answers}]",
+ "restore_complete": "Відновлення завершено",
+ "restore_cleaning_failed": "Не вдалося очистити тимчасовий каталог відновлення",
+ "restore_backup_too_old": "Цей архів резервних копій не може бути відновлений, бо він отриманий з дуже старої версії YunoHost.",
+ "restore_already_installed_apps": "Наступні програми не можуть бути відновлені, тому що вони вже встановлені: {apps}",
+ "restore_already_installed_app": "Застосунок з ID «{app}» вже встановлено",
+ "regex_with_only_domain": "Ви не можете використовувати regex для домену, тільки для шляху",
+ "regex_incompatible_with_tile": "/! \\ Packagers! Дозвіл '{permission}' має значення show_tile 'true', тому ви не можете визначити regex URL в якості основної URL",
+ "regenconf_need_to_explicitly_specify_ssh": "Конфігурація ssh була змінена вручну, але вам потрібно явно вказати категорію 'ssh' з --force, щоб застосувати зміни.",
+ "regenconf_pending_applying": "Застосування очікує конфігурації для категорії '{category}'...",
+ "regenconf_failed": "Не вдалося відновити конфігурацію для категорії (категорій): {categories}",
+ "regenconf_dry_pending_applying": "Перевірка очікує конфігурації, яка була б застосована для категорії '{category}'…",
+ "regenconf_would_be_updated": "Конфігурація була б оновлена для категорії '{category}'",
+ "regenconf_updated": "Конфігурація оновлена для категорії '{category}'",
+ "regenconf_up_to_date": "Конфігурація вже оновлена для категорії '{category}'",
+ "regenconf_now_managed_by_yunohost": "Конфігураційний файл '{conf}' тепер управляється YunoHost (категорія {category}).",
+ "regenconf_file_updated": "Конфігураційний файл '{conf}' оновлено",
+ "regenconf_file_removed": "Конфігураційний файл '{conf}' видалено",
+ "regenconf_file_remove_failed": "Неможливо видалити файл конфігурації '{conf}'",
+ "regenconf_file_manually_removed": "Конфігураційний файл '{conf}' було видалено вручну і не буде створено",
+ "regenconf_file_manually_modified": "Конфігураційний файл '{conf}' було змінено вручну і не буде оновлено",
+ "regenconf_file_kept_back": "Очікувалося видалення конфігураційного файлу '{conf}' за допомогою regen-conf (категорія {category}), але його було збережено.",
+ "regenconf_file_copy_failed": "Не вдалося скопіювати новий файл конфігурації '{new}' в '{conf}'",
+ "regenconf_file_backed_up": "Конфігураційний файл '{conf}' збережено в '{backup}'",
+ "postinstall_low_rootfsspace": "Загальне місце кореневої файлової системи становить менше 10 ГБ, що викликає занепокоєння! Швидше за все, дисковий простір закінчиться дуже скоро! Рекомендовано мати не менше 16 ГБ для кореневої файлової системи. Якщо ви хочете встановити YunoHost попри це попередження, повторно запустіть післявстановлення з параметром --force-diskspace",
+ "port_already_opened": "Порт {port} вже відкрито для з'єднань {ip_version}",
+ "port_already_closed": "Порт {port} вже закрито для з'єднань {ip_version}",
+ "permission_require_account": "Дозвіл {permission} має зміст тільки для користувачів, що мають обліковий запис, і тому не може бути увімкненим для відвідувачів.",
+ "permission_protected": "Дозвіл {permission} захищено. Ви не можете додавати або вилучати групу відвідувачів до/з цього дозволу.",
+ "permission_updated": "Дозвіл '{permission}' оновлено",
+ "permission_update_failed": "Не вдалося оновити дозвіл '{permission}': {error}",
+ "permission_not_found": "Дозвіл '{permission}' не знайдено",
+ "permission_deletion_failed": "Не вдалося видалити дозвіл '{permission}': {error}",
+ "permission_deleted": "Дозвіл '{permission}' видалено",
+ "permission_cant_add_to_all_users": "Дозвіл {permission} не може бути додано всім користувачам.",
+ "permission_currently_allowed_for_all_users": "Наразі цей дозвіл надається всім користувачам на додачу до інших груп. Імовірно, вам потрібно або видалити дозвіл 'all_users', або видалити інші групи, яким його зараз надано.",
+ "permission_creation_failed": "Не вдалося створити дозвіл '{permission}': {error}",
+ "permission_created": "Дозвіл '{permission}' створено",
+ "permission_cannot_remove_main": "Вилучення основного дозволу заборонене",
+ "permission_already_up_to_date": "Дозвіл не було оновлено, тому що запити на додавання/вилучення вже відповідають поточному стану.",
+ "permission_already_exist": "Дозвіл '{permission}' вже існує",
+ "permission_already_disallowed": "Група '{group}' вже має вимкнений дозвіл '{permission}'",
+ "permission_already_allowed": "Група '{group}' вже має увімкнений дозвіл '{permission}'",
+ "pattern_password_app": "На жаль, паролі не можуть містити такі символи: {forbidden_chars}",
+ "pattern_username": "Має складатися тільки з букв і цифр в нижньому регістрі і символів підкреслення",
+ "pattern_port_or_range": "Має бути припустимий номер порту (наприклад, 0-65535) або діапазон портів (наприклад, 100:200)",
+ "pattern_password": "Має бути довжиною не менше 3 символів",
+ "pattern_mailbox_quota": "Має бути розмір з суфіксом b/k/M/G/T або 0, щоб не мати квоти",
+ "pattern_lastname": "Має бути припустиме прізвище",
+ "pattern_firstname": "Має бути припустиме ім'я",
+ "pattern_email": "Має бути припустима адреса е-пошти, без символу '+' (наприклад, someone@example.com)",
+ "pattern_email_forward": "Має бути припустима адреса е-пошти, символ '+' приймається (наприклад, someone+tag@example.com)",
+ "pattern_domain": "Має бути припустиме доменне ім'я (наприклад, my-domain.org)",
+ "pattern_backup_archive_name": "Має бути правильна назва файлу, що містить не більше 30 символів, тільки букви і цифри і символи -_",
+ "password_too_simple_4": "Пароль має складатися не менше ніж з 12 символів і містити цифри, великі та малі символи і спеціальні символи",
+ "password_too_simple_3": "Пароль має складатися не менше ніж з 8 символів і містити цифри, великі та малі символи і спеціальні символи",
+ "password_too_simple_2": "Пароль має складатися не менше ніж з 8 символів і містити цифри, великі та малі символи",
+ "password_too_simple_1": "Пароль має складатися не менше ніж з 8 символів",
+ "password_listed": "Цей пароль входить в число найбільш часто використовуваних паролів у світі. Будь ласка, виберіть щось неповторюваніше.",
+ "packages_upgrade_failed": "Не вдалося оновити всі пакети",
+ "operation_interrupted": "Операція була вручну перервана?",
+ "invalid_number": "Має бути числом",
+ "not_enough_disk_space": "Недостатньо вільного місця на '{path}'",
+ "migrations_to_be_ran_manually": "Міграція {id} повинна бути запущена вручну. Будь ласка, перейдіть в розділ Засоби → Міграції на сторінці вебадміністрації або виконайте команду `yunohost tools migrations run`.",
+ "migrations_success_forward": "Міграцію {id} завершено",
+ "migrations_skip_migration": "Пропускання міграції {id}...",
+ "migrations_running_forward": "Виконання міграції {id}...",
+ "migrations_pending_cant_rerun": "Наступні міграції ще не завершені, тому не можуть бути запущені знову: {ids}",
+ "migrations_not_pending_cant_skip": "Наступні міграції не очікують виконання, тому не можуть бути пропущені: {ids}",
+ "migrations_no_such_migration": "Не існує міграції під назвою '{id}'",
+ "migrations_no_migrations_to_run": "Немає міграцій для запуску",
+ "migrations_need_to_accept_disclaimer": "Щоб запустити міграцію {id}, ви повинні прийняти наступну відмову від відповідальності:\n---\n{disclaimer}\n---\nЯкщо ви згодні запустити міграцію, будь ласка, повторіть команду з опцією '--accept-disclaimer'.",
+ "migrations_must_provide_explicit_targets": "Ви повинні вказати явні цілі при використанні '--skip' або '--force-rerun'",
+ "migrations_migration_has_failed": "Міграція {id} не завершена, перериваємо. Помилка: {exception}",
+ "migrations_loading_migration": "Завантаження міграції {id}...",
+ "migrations_list_conflict_pending_done": "Ви не можете одночасно використовувати '--previous' і '--done'.",
+ "migrations_exclusive_options": "'--auto', '--skip', і '--force-rerun' є взаємовиключними опціями.",
+ "migrations_failed_to_load_migration": "Не вдалося завантажити міграцію {id}: {error}",
+ "migrations_dependencies_not_satisfied": "Запустіть ці міграції: '{dependencies_id}', перед міграцією {id}.",
+ "migrations_cant_reach_migration_file": "Не вдалося отримати доступ до файлів міграцій за шляхом '%s'",
+ "migrations_already_ran": "Наступні міграції вже виконано: {ids}",
+ "migration_0019_slapd_config_will_be_overwritten": "Схоже, що ви вручну відредагували конфігурацію slapd. Для цього критичного переходу YunoHost повинен примусово оновити конфігурацію slapd. Оригінальні файли будуть збережені в {conf_backup_folder}.",
+ "migration_0019_add_new_attributes_in_ldap": "Додавання нових атрибутів для дозволів у базі даних LDAP",
+ "migration_0018_failed_to_reset_legacy_rules": "Не вдалося скинути спадкові правила iptables: {error}",
+ "migration_0018_failed_to_migrate_iptables_rules": "Не вдалося перенести спадкові правила iptables в nftables: {error}",
+ "migration_0017_not_enough_space": "Звільніть достатньо місця в {path} для запуску міграції.",
+ "migration_0017_postgresql_11_not_installed": "PostgreSQL 9.6 встановлено, але не PostgreSQL 11‽ Можливо, у вашій системі відбулося щось дивне :(...",
+ "migration_0017_postgresql_96_not_installed": "PostgreSQL не встановлено у вашій системі. Нічого не потрібно робити.",
+ "migration_0015_weak_certs": "Було виявлено, що такі сертифікати все ще використовують слабкі алгоритми підпису і повинні бути оновлені для сумісності з наступною версією nginx: {certs}",
+ "migration_0015_cleaning_up": "Очищення кеш-пам'яті і пакетів, які більше не потрібні...",
+ "migration_0015_specific_upgrade": "Початок оновлення системних пакетів, які повинні бути оновлені незалежно...",
+ "migration_0015_modified_files": "Зверніть увагу, що такі файли були змінені вручну і можуть бути перезаписані після оновлення: {manually_modified_files}",
+ "migration_0015_problematic_apps_warning": "Зверніть увагу, що були виявлені наступні, можливо, проблемні встановлені застосунки. Схоже, що вони не були встановлені з каталогу застосунків YunoHost або не зазначені як «робочі». Отже, не можна гарантувати, що вони будуть працювати після оновлення: {problematic_apps}",
+ "migration_0015_general_warning": "Будь ласка, зверніть увагу, що ця міграція є делікатною операцією. Команда YunoHost зробила все можливе, щоб перевірити і протестувати її, але міграція все ще може порушити частина системи або її застосунків.\n\nТому рекомендовано:\n - Виконати резервне копіювання всіх важливих даних або застосунків. Подробиці на сайті https://yunohost.org/backup; \n - Наберіться терпіння після запуску міграції: В залежності від вашого з'єднання з Інтернетом і апаратного забезпечення, оновлення може зайняти до декількох годин.",
+ "migration_0015_system_not_fully_up_to_date": "Ваша система не повністю оновлена. Будь ласка, виконайте регулярне оновлення перед запуском міграції на Buster.",
+ "migration_0015_not_enough_free_space": "Вільного місця в /var/ досить мало! У вас повинно бути не менше 1 ГБ вільного місця, щоб запустити цю міграцію.",
+ "migration_0015_not_stretch": "Поточний дистрибутив Debian не є Stretch!",
+ "migration_0015_yunohost_upgrade": "Початок оновлення ядра YunoHost...",
+ "migration_0015_still_on_stretch_after_main_upgrade": "Щось пішло не так під час основного оновлення, система, схоже, все ще знаходиться на Debian Stretch",
+ "migration_0015_main_upgrade": "Початок основного оновлення...",
+ "migration_0015_patching_sources_list": "Виправлення sources.lists...",
+ "migration_0015_start": "Початок міграції на Buster",
+ "migration_update_LDAP_schema": "Оновлення схеми LDAP...",
+ "migration_ldap_rollback_success": "Система відкотилася.",
+ "migration_ldap_migration_failed_trying_to_rollback": "Не вдалося виконати міграцію... Пробуємо відкотити систему.",
+ "migration_ldap_can_not_backup_before_migration": "Не вдалося завершити резервне копіювання системи перед невдалою міграцією. Помилка: {error}",
+ "migration_ldap_backup_before_migration": "Створення резервної копії бази даних LDAP і налаштування застосунків перед фактичною міграцією.",
+ "migration_description_0020_ssh_sftp_permissions": "Додавання підтримки дозволів SSH і SFTP",
+ "migration_description_0019_extend_permissions_features": "Розширення/переробка системи управління дозволами застосунків",
+ "migration_description_0018_xtable_to_nftable": "Перенесення старих правил мережевого трафіку в нову систему nftable",
+ "migration_description_0017_postgresql_9p6_to_11": "Перенесення баз даних з PostgreSQL 9.6 на 11",
+ "migration_description_0016_php70_to_php73_pools": "Перенесення php7.0-fpm 'pool' conf файлів на php7.3",
+ "migration_description_0015_migrate_to_buster": "Оновлення системи до Debian Buster і YunoHost 4.x",
+ "migrating_legacy_permission_settings": "Перенесення спадкових налаштувань дозволів...",
+ "main_domain_changed": "Основний домен було змінено",
+ "main_domain_change_failed": "Неможливо змінити основний домен",
+ "mail_unavailable": "Ця е-пошта зарезервована і буде автоматично виділена найпершому користувачеві",
+ "mailbox_used_space_dovecot_down": "Поштова служба Dovecot повинна бути запущена, якщо ви хочете отримати використане місце в поштовій скриньці",
+ "mailbox_disabled": "Е-пошта вимкнена для користувача {user}",
+ "mail_forward_remove_failed": "Не вдалося видалити переадресацію електронної пошти '{mail}'",
+ "mail_domain_unknown": "Неправильна адреса е-пошти для домену '{domain}'. Будь ласка, використовуйте домен, що адмініструється цим сервером.",
+ "mail_alias_remove_failed": "Не вдалося видалити аліас електронної пошти '{mail}'",
+ "log_tools_reboot": "Перезавантаження сервера",
+ "log_tools_shutdown": "Вимикання сервера",
+ "log_tools_upgrade": "Оновлення системних пакетів",
+ "log_tools_postinstall": "Післявстановлення сервера YunoHost",
+ "log_tools_migrations_migrate_forward": "Запущено міграції",
+ "log_domain_main_domain": "Зроблено '{}' основним доменом",
+ "log_user_permission_reset": "Скинуто дозвіл «{}»",
+ "log_user_permission_update": "Оновлено доступи для дозволу '{}'",
+ "log_user_update": "Оновлено відомості для користувача '{}'",
+ "log_user_group_update": "Оновлено групу '{}'",
+ "log_user_group_delete": "Видалено групу «{}»",
+ "log_user_group_create": "Створено групу '{}'",
+ "log_user_delete": "Видалення користувача '{}'",
+ "log_user_create": "Додавання користувача '{}'",
+ "log_regen_conf": "Перестворення системних конфігурацій '{}'",
+ "log_letsencrypt_cert_renew": "Оновлення сертифікату Let's Encrypt на домені '{}'",
+ "log_selfsigned_cert_install": "Установлення самопідписаного сертифікату на домені '{}'",
+ "log_permission_url": "Оновлення URL, пов'язаногл з дозволом '{}'",
+ "log_permission_delete": "Видалення дозволу '{}'",
+ "log_permission_create": "Створення дозволу '{}'",
+ "log_letsencrypt_cert_install": "Установлення сертифікату Let's Encrypt на домен '{}'",
+ "log_dyndns_update": "Оновлення IP, пов'язаного з вашим піддоменом YunoHost '{}'",
+ "log_dyndns_subscribe": "Підписка на піддомен YunoHost '{}'",
+ "log_domain_remove": "Вилучення домену '{}' з конфігурації системи",
+ "log_domain_add": "Додавання домену '{}' в конфігурацію системи",
+ "log_remove_on_failed_install": "Вилучення '{}' після невдалого встановлення",
+ "log_remove_on_failed_restore": "Вилучення '{}' після невдалого відновлення з резервного архіву",
+ "log_backup_restore_app": "Відновлення '{}' з архіву резервних копій",
+ "log_backup_restore_system": "Відновлення системи з резервного архіву",
+ "log_backup_create": "Створення резервного архіву",
+ "log_available_on_yunopaste": "Цей журнал тепер доступний за посиланням {url}",
+ "log_app_action_run": "Запуск дії застосунку «{}»",
+ "log_app_makedefault": "Застосунок '{}' зроблено типовим",
+ "log_app_upgrade": "Оновлення застосунку '{}'",
+ "log_app_remove": "Вилучення застосунку '{}'",
+ "log_app_install": "Установлення застосунку '{}'",
+ "log_app_change_url": "Змінення URL-адреси застосунку «{}»",
+ "log_operation_unit_unclosed_properly": "Блок операцій не був закритий належним чином",
+ "log_does_exists": "Немає журналу операцій з назвою '{log}', використовуйте 'yunohost log list', щоб подивитися всі доступні журнали операцій",
+ "log_help_to_get_failed_log": "Операція '{desc}' не може бути завершена. Будь ласка, поділіться повним журналом цієї операції, використовуючи команду 'yunohost log share {name}', щоб отримати допомогу",
+ "log_link_to_failed_log": "Не вдалося завершити операцію '{desc}'. Будь ласка, надайте повний журнал цієї операції, натиснувши тут, щоб отримати допомогу",
+ "log_help_to_get_log": "Щоб переглянути журнал операції '{desc}', використовуйте команду 'yunohost log show {name}'",
+ "log_link_to_log": "Повний журнал цієї операції: '{desc}'",
+ "log_corrupted_md_file": "Файл метаданих YAML, пов'язаний з журналами, пошкоджено: '{md_file}\nПомилка: {error}'",
+ "iptables_unavailable": "Ви не можете грати з iptables тут. Ви перебуваєте або в контейнері, або ваше ядро не підтримує його",
+ "ip6tables_unavailable": "Ви не можете грати з ip6tables тут. Ви перебуваєте або в контейнері, або ваше ядро не підтримує його",
+ "invalid_regex": "Неприпустимий regex: '{regex}'",
+ "installation_complete": "Установлення завершено",
+ "hook_name_unknown": "Невідома назва хука '{name}'",
+ "hook_list_by_invalid": "Цю властивість не може бути використано для перерахування хуків (гачків)",
+ "hook_json_return_error": "Не вдалося розпізнати повернення з хука {path}. Помилка: {msg}. Необроблений контент: {raw_content}",
+ "hook_exec_not_terminated": "Скрипт не завершився належним чином: {path}",
+ "hook_exec_failed": "Не вдалося запустити скрипт: {path}",
+ "group_user_not_in_group": "Користувач {user} не входить в групу {group}",
+ "group_user_already_in_group": "Користувач {user} вже в групі {group}",
+ "group_update_failed": "Не вдалося оновити групу '{group}': {error}",
+ "group_updated": "Групу '{group}' оновлено",
+ "group_unknown": "Група '{group}' невідома",
+ "group_deletion_failed": "Не вдалося видалити групу '{group}': {error}",
+ "group_deleted": "Групу '{group}' видалено",
+ "group_cannot_be_deleted": "Група {group} не може бути видалена вручну.",
+ "group_cannot_edit_primary_group": "Група '{group}' не може бути відредагована вручну. Це основна група, призначена тільки для одного конкретного користувача.",
+ "group_cannot_edit_visitors": "Група 'visitors' не може бути відредагована вручну. Це спеціальна група, що представляє анонімних відвідувачів",
+ "group_cannot_edit_all_users": "Група 'all_users' не може бути відредагована вручну. Це спеціальна група, призначена для всіх користувачів, зареєстрованих в YunoHost",
+ "group_creation_failed": "Не вдалося створити групу '{group}': {error}",
+ "group_created": "Групу '{group}' створено",
+ "group_already_exist_on_system_but_removing_it": "Група {group} вже існує в групах системи, але YunoHost вилучить її...",
+ "group_already_exist_on_system": "Група {group} вже існує в групах системи",
+ "group_already_exist": "Група {group} вже існує",
+ "good_practices_about_user_password": "Зараз ви збираєтеся поставити новий пароль користувача. Пароль повинен складатися не менше ніж з 8 символів, але хорошою практикою є використання більш довгого пароля (тобто гасла) і/або використання різних символів (великих, малих, цифр і спеціальних символів).",
+ "good_practices_about_admin_password": "Зараз ви збираєтеся поставити новий пароль адміністрації. Пароль повинен складатися не менше ніж з 8 символів, але хорошою практикою є використання більш довгого пароля (тобто парольного гасла) і/або використання різних символів (великих, малих, цифр і спеціальних символів).",
+ "global_settings_unknown_type": "Несподівана ситуація, налаштування {setting} має тип {unknown_type}, але це не тип, підтримуваний системою.",
+ "global_settings_setting_backup_compress_tar_archives": "При створенні нових резервних копій стискати архіви (.tar.gz) замість нестислих архівів (.tar). NB: вмикання цієї опції означає створення легших архівів резервних копій, але початкова процедура резервного копіювання буде значно довшою і важчою для CPU.",
+ "global_settings_setting_security_webadmin_allowlist": "IP-адреси, яким дозволений доступ до вебадміністрації. Через кому.",
+ "global_settings_setting_security_webadmin_allowlist_enabled": "Дозволити доступ до вебадміністрації тільки деяким IP-адресам.",
+ "global_settings_setting_smtp_relay_password": "Пароль хоста SMTP-ретрансляції",
+ "global_settings_setting_smtp_relay_user": "Обліковий запис користувача SMTP-ретрансляції",
+ "global_settings_setting_smtp_relay_port": "Порт SMTP-ретрансляції",
+ "global_settings_setting_smtp_relay_host": "Хост SMTP-ретрансляції, який буде використовуватися для надсилання е-пошти замість цього зразка Yunohost. Корисно, якщо ви знаходитеся в одній із цих ситуацій: ваш 25 порт заблокований вашим провайдером або VPS провайдером, у вас є житловий IP в списку DUHL, ви не можете налаштувати зворотний DNS або цей сервер не доступний безпосередньо в Інтернеті і ви хочете використовувати інший сервер для відправки електронних листів.",
+ "global_settings_setting_smtp_allow_ipv6": "Дозволити використання IPv6 для отримання і надсилання листів е-пошти",
+ "global_settings_setting_ssowat_panel_overlay_enabled": "Увімкнути накладення панелі SSOwat",
+ "global_settings_setting_service_ssh_allow_deprecated_dsa_hostkey": "Дозволити використання (застарілого) ключа DSA для конфігурації демона SSH",
+ "global_settings_unknown_setting_from_settings_file": "Невідомий ключ в налаштуваннях: '{setting_key}', відхиліть його і збережіть у /etc/yunohost/settings-unknown.json",
+ "global_settings_setting_security_ssh_port": "SSH-порт",
+ "global_settings_setting_security_postfix_compatibility": "Компроміс між сумісністю і безпекою для сервера Postfix. Впливає на шифри (і інші аспекти, пов'язані з безпекою)",
+ "global_settings_setting_security_ssh_compatibility": "Компроміс між сумісністю і безпекою для SSH-сервера. Впливає на шифри (і інші аспекти, пов'язані з безпекою)",
+ "global_settings_setting_security_password_user_strength": "Надійність пароля користувача",
+ "global_settings_setting_security_password_admin_strength": "Надійність пароля адміністратора",
+ "global_settings_setting_security_nginx_compatibility": "Компроміс між сумісністю і безпекою для вебсервера NGINX. Впливає на шифри (і інші аспекти, пов'язані з безпекою)",
+ "global_settings_setting_pop3_enabled": "Увімкніть протокол POP3 для поштового сервера",
+ "global_settings_reset_success": "Попередні налаштування тепер збережені в {path}",
+ "global_settings_key_doesnt_exists": "Ключ '{settings_key}' не існує в глобальних налаштуваннях, ви можете побачити всі доступні ключі, виконавши команду 'yunohost settings list'",
+ "global_settings_cant_write_settings": "Неможливо зберегти файл налаштувань, причина: {reason}",
+ "global_settings_cant_serialize_settings": "Не вдалося серіалізувати дані налаштувань, причина: {reason}",
+ "global_settings_cant_open_settings": "Не вдалося відкрити файл налаштувань, причина: {reason}",
+ "global_settings_bad_type_for_setting": "Поганий тип для налаштування {setting}, отримано {received_type}, а очікується {expected_type}",
+ "global_settings_bad_choice_for_enum": "Поганий вибір для налаштування {setting}, отримано '{choice}', але доступні наступні варіанти: {available_choices}",
+ "firewall_rules_cmd_failed": "Деякі команди правил фаєрвола не спрацювали. Подробиці в журналі.",
+ "firewall_reloaded": "Фаєрвол перезавантажено",
+ "firewall_reload_failed": "Не вдалося перезавантажити фаєрвол",
+ "file_does_not_exist": "Файл {path} не існує.",
+ "field_invalid": "Неприпустиме поле '{}'",
+ "experimental_feature": "Попередження: Ця функція є експериментальною і не вважається стабільною, ви не повинні використовувати її, якщо не знаєте, що робите.",
+ "extracting": "Витягнення...",
+ "dyndns_unavailable": "Домен '{domain}' недоступний.",
+ "dyndns_domain_not_provided": "DynDNS провайдер {provider} не може надати домен {domain}.",
+ "dyndns_registration_failed": "Не вдалося зареєструвати домен DynDNS: {error}",
+ "dyndns_registered": "Домен DynDNS зареєстровано",
+ "dyndns_provider_unreachable": "Неможливо зв'язатися з провайдером DynDNS {provider}: або ваш YunoHost неправильно під'єднано до Інтернету, або сервер dynette не працює.",
+ "dyndns_no_domain_registered": "Домен не зареєстровано в DynDNS",
+ "dyndns_key_not_found": "DNS-ключ для домену не знайдено",
+ "dyndns_key_generating": "Утворення DNS-ключа... Це може зайняти деякий час.",
+ "dyndns_ip_updated": "Вашу IP-адресу в DynDNS оновлено",
+ "dyndns_ip_update_failed": "Не вдалося оновити IP-адресу в DynDNS",
+ "dyndns_could_not_check_available": "Не вдалося перевірити, чи {domain} доступний у {provider}.",
+ "dyndns_could_not_check_provide": "Не вдалося перевірити, чи може {provider} надати {domain}.",
+ "dpkg_lock_not_available": "Ця команда не може бути виконана прямо зараз, тому що інша програма, схоже, використовує блокування dpkg (системного менеджера пакетів)",
+ "dpkg_is_broken": "Ви не можете зробити це прямо зараз, тому що dpkg/APT (системні менеджери пакетів), схоже, знаходяться в зламаному стані... Ви можете спробувати вирішити цю проблему, під'єднавшись через SSH і виконавши `sudo apt install --fix-broken` та/або `sudo dpkg --configure -a`.",
+ "downloading": "Завантаження…",
+ "done": "Готово",
+ "domains_available": "Доступні домени:",
+ "domain_name_unknown": "Домен '{domain}' невідомий",
+ "domain_uninstall_app_first": "Ці застосунки все ще встановлені на вашому домені:\n{apps}\n\nВидаліть їх за допомогою 'yunohost app remove the_app_id' або перемістіть їх на інший домен за допомогою 'yunohost app change-url the_app_id', перш ніж приступити до вилучення домену",
+ "domain_remove_confirm_apps_removal": "Вилучення цього домену призведе до вилучення таких застосунків:\n{apps}\n\nВи впевнені, що хочете це зробити? [{answers}]",
+ "domain_hostname_failed": "Неможливо встановити нову назву хоста. Це може викликати проблеми в подальшому (можливо, все буде в порядку).",
+ "domain_exists": "Цей домен уже існує",
+ "domain_dyndns_root_unknown": "Невідомий кореневий домен DynDNS",
+ "domain_dyndns_already_subscribed": "Ви вже підписалися на домен DynDNS",
+ "domain_dns_conf_is_just_a_recommendation": "Ця команда показує *рекомендовану* конфігурацію. Насправді вона не встановлює конфігурацію DNS для вас. Ви самі повинні налаштувати свою зону DNS у реєстратора відповідно до цих рекомендацій.",
+ "domain_deletion_failed": "Неможливо видалити домен {domain}: {error}",
+ "domain_deleted": "Домен видалено",
+ "domain_creation_failed": "Неможливо створити домен {domain}: {error}",
+ "domain_created": "Домен створено",
+ "domain_cert_gen_failed": "Не вдалося утворити сертифікат",
+ "domain_cannot_remove_main_add_new_one": "Ви не можете видалити '{domain}', так як це основний домен і ваш єдиний домен, вам потрібно спочатку додати інший домен за допомогою 'yunohost domain add ', потім встановити його як основний домен за допомогою 'yunohost domain main-domain -n ' і потім ви можете вилучити домен '{domain}' за допомогою 'yunohost domain remove {domain}'.'",
+ "domain_cannot_add_xmpp_upload": "Ви не можете додавати домени, що починаються з 'xmpp-upload.'. Таку назву зарезервовано для функції XMPP upload, вбудованої в YunoHost.",
+ "domain_cannot_remove_main": "Ви не можете вилучити '{domain}', бо це основний домен, спочатку вам потрібно встановити інший домен в якості основного за допомогою 'yunohost domain main-domain -n '; ось список доменів-кандидатів: {other_domains}",
+ "disk_space_not_sufficient_update": "Недостатньо місця на диску для оновлення цього застосунку",
+ "disk_space_not_sufficient_install": "Недостатньо місця на диску для встановлення цього застосунку",
+ "diagnosis_sshd_config_inconsistent_details": "Будь ласка, виконайте команду yunohost settings set security.ssh.port -v YOUR_SSH_PORT , щоб визначити порт SSH, і перевіртеyunohost tools regen-conf ssh --dry-run --with-diff і yunohost tools regen-conf ssh --force , щоб скинути ваш конфіг на рекомендований YunoHost.",
+ "diagnosis_sshd_config_inconsistent": "Схоже, що порт SSH був уручну змінений в /etc/ssh/sshd_config. Починаючи з версії YunoHost 4.2, доступний новий глобальний параметр 'security.ssh.port', що дозволяє уникнути ручного редагування конфігурації.",
+ "diagnosis_sshd_config_insecure": "Схоже, що конфігурація SSH була змінена вручну і є небезпечною, оскільки не містить директив 'AllowGroups' або 'AllowUsers' для обмеження доступу авторизованих користувачів.",
+ "diagnosis_processes_killed_by_oom_reaper": "Деякі процеси було недавно вбито системою через брак пам'яті. Зазвичай це є симптомом нестачі пам'яті в системі або процесу, який з'їв дуже багато пам'яті. Зведення убитих процесів:\n{kills_summary}",
+ "diagnosis_never_ran_yet": "Схоже, що цей сервер був налаштований недавно, і поки немає звіту про діагностику. Вам слід почати з повної діагностики, або з вебадміністрації, або використовуючи 'yunohost diagnosis run' з командного рядка.",
+ "diagnosis_unknown_categories": "Наступні категорії невідомі: {categories}",
+ "diagnosis_http_nginx_conf_not_up_to_date_details": "Щоб виправити становище, перевірте різницю за допомогою командного рядка, використовуючи yunohost tools regen-conf nginx --dry-run --with-diff , і якщо все в порядку, застосуйте зміни за допомогою команди yunohost tools regen-conf nginx --force .",
+ "diagnosis_http_nginx_conf_not_up_to_date": "Схоже, що конфігурація nginx цього домену була змінена вручну, що не дозволяє YunoHost визначити, чи доступний він по HTTP.",
+ "diagnosis_http_partially_unreachable": "Домен {domain} здається недоступним по HTTP поза локальною мережею в IPv{failed}, хоча він працює в IPv{passed}.",
+ "diagnosis_http_unreachable": "Домен {domain} здається недоступним через HTTP поза локальною мережею.",
+ "diagnosis_http_bad_status_code": "Схоже, що замість вашого сервера відповіла інша машина (можливо, ваш маршрутизатор).
1. Найбільш поширеною причиною цієї проблеми є те, що порт 80 (і 443) неправильно перенаправлено на ваш сервер .
2. На більш складних установках: переконайтеся, що немає фаєрвола або зворотного проксі.",
+ "diagnosis_http_connection_error": "Помилка з'єднання: не вдалося з'єднатися із запитуваним доменом, швидше за все, він недоступний.",
+ "diagnosis_http_timeout": "При спробі зв'язатися з вашим сервером ззовні стався тайм-аут. Він здається недоступним.
1. Найбільш поширеною причиною цієї проблеми є те, що порт 80 (і 443) неправильно перенаправлено на ваш сервер .
2. Ви також повинні переконатися, що служба nginx запущена
3.На більш складних установках: переконайтеся, що немає фаєрвола або зворотного проксі.",
+ "diagnosis_http_ok": "Домен {domain} доступний по HTTP поза локальною мережею.",
+ "diagnosis_http_localdomain": "Домен {domain} з .local TLD не може бути доступний ззовні локальної мережі.",
+ "diagnosis_http_could_not_diagnose_details": "Помилка: {error}",
+ "diagnosis_http_could_not_diagnose": "Не вдалося діагностувати досяжність доменів ззовні в IPv{ipversion}.",
+ "diagnosis_http_hairpinning_issue_details": "Можливо, це пов'язано з коробкою/маршрутизатором вашого інтернет-провайдера. В результаті, люди ззовні вашої локальної мережі зможуть отримати доступ до вашого сервера, як і очікувалося, але не люди зсередини локальної мережі (як ви, ймовірно?) При використанні доменного імені або глобального IP. Можливо, ви зможете поліпшити ситуацію, глянувши https://yunohost.org/dns_local_network ",
+ "diagnosis_http_hairpinning_issue": "Схоже, що у вашій локальній мережі не увімкнено шпилькування (hairpinning).",
+ "diagnosis_ports_forwarding_tip": "Щоб вирішити цю проблему, вам, швидше за все, потрібно налаштувати пересилання портів на вашому інтернет-маршрутизаторі, як описано в https://yunohost.org/isp_box_config",
+ "diagnosis_ports_needed_by": "Відкриття цього порту необхідне для функцій {category} (служба {service})",
+ "diagnosis_ports_ok": "Порт {port} доступний ззовні.",
+ "diagnosis_ports_partially_unreachable": "Порт {port} не доступний ззовні в IPv{failed}.",
+ "diagnosis_ports_unreachable": "Порт {port} недоступний ззовні.",
+ "diagnosis_ports_could_not_diagnose_details": "Помилка: {error}",
+ "diagnosis_ports_could_not_diagnose": "Не вдалося діагностувати досяжність портів ззовні в IPv{ipversion}.",
+ "diagnosis_description_regenconf": "Конфігурації системи",
+ "diagnosis_description_mail": "Е-пошта",
+ "diagnosis_description_ports": "Виявлення портів",
+ "diagnosis_description_systemresources": "Системні ресурси",
+ "diagnosis_description_services": "Перевірка стану служб",
+ "diagnosis_description_dnsrecords": "DNS-записи",
+ "diagnosis_description_ip": "Інтернет-з'єднання",
+ "diagnosis_description_basesystem": "Основна система",
+ "diagnosis_security_vulnerable_to_meltdown_details": "Щоб виправити це, вам слід оновити систему і перезавантажитися, щоб завантажити нове ядро Linux (або звернутися до вашого серверного провайдера, якщо це не спрацює). Докладніше див. на сайті https://meltdownattack.com/.",
+ "diagnosis_security_vulnerable_to_meltdown": "Схоже, що ви вразливі до критичної вразливості безпеки Meltdown",
+ "diagnosis_rootfstotalspace_critical": "Коренева файлова система має тільки {space}, що дуже тривожно! Скоріше за все, дисковий простір закінчиться дуже скоро! Рекомендовано мати не менше 16 ГБ для кореневої файлової системи.",
+ "diagnosis_rootfstotalspace_warning": "Коренева файлова система має тільки {space}. Можливо це нормально, але будьте обережні, тому що в кінцевому підсумку дисковий простір може швидко закінчитися... Рекомендовано мати не менше 16 ГБ для кореневої файлової системи.",
+ "diagnosis_regenconf_manually_modified_details": "Можливо це нормально, якщо ви знаєте, що робите! YunoHost перестане оновлювати цей файл автоматично... Але врахуйте, що оновлення YunoHost можуть містити важливі рекомендовані зміни. Якщо ви хочете, ви можете перевірити відмінності за допомогою команди yunohost tools regen-conf {category} --dry-run --with-diff і примусово повернути рекомендовану конфігурацію за допомогою команди yunohost tools regen-conf {category} --force ",
+ "diagnosis_regenconf_manually_modified": "Конфігураційний файл {file}
, схоже, було змінено вручну.",
+ "diagnosis_regenconf_allgood": "Усі конфігураційні файли відповідають рекомендованій конфігурації!",
+ "diagnosis_mail_queue_too_big": "Занадто багато відкладених листів у поштовій черзі (листів: {nb_pending})",
+ "diagnosis_mail_queue_unavailable_details": "Помилка: {error}",
+ "diagnosis_mail_queue_unavailable": "Неможливо дізнатися кількість очікувальних листів у черзі",
+ "diagnosis_mail_queue_ok": "Відкладених електронних листів у поштових чергах: {nb_pending}",
+ "diagnosis_mail_blacklist_website": "Після визначення причини, з якої ви потрапили в чорний список, і її усунення, ви можете попросити видалити ваш IP або домен на {blacklist_website}",
+ "diagnosis_mail_blacklist_reason": "Причина внесення в чорний список: {reason}",
+ "diagnosis_mail_blacklist_listed_by": "Ваш IP або домен {item}
знаходиться в чорному списку {blacklist_name}",
+ "diagnosis_mail_blacklist_ok": "IP-адреси і домени, які використовуються цим сервером, не внесені в чорний список",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "Поточний зворотний DNS:{rdns_domain}
Очікуване значення: {ehlo_domain}
",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain": "Зворотний DNS неправильно налаштований в IPv{ipversion}. Деякі електронні листи можуть бути не доставлені або можуть бути відзначені як спам.",
+ "diagnosis_mail_fcrdns_nok_alternatives_6": "Деякі провайдери не дозволять вам налаштувати зворотний DNS (або їх функція може бути зламана...). Якщо ваш зворотний DNS правильно налаштований для IPv4, ви можете спробувати вимкнути використання IPv6 при надсиланні листів, виконавши команду yunohost settings set smtp.allow_ipv6 -v off . Примітка: останнє рішення означає, що ви не зможете надсилати або отримувати електронні листи з нечисленних серверів, що використовують тільки IPv6.",
+ "diagnosis_mail_fcrdns_nok_alternatives_4": "Деякі провайдери не дозволять вам налаштувати зворотний DNS (або їх функція може бути зламана...). Якщо ви відчуваєте проблеми через це, розгляньте наступні рішення:
- Деякі провайдери надають альтернативу використання ретранслятора поштового сервера, хоча це має на увазі, що ретранслятор зможе шпигувати за вашим поштовим трафіком.
- Альтернативою для захисту конфіденційності є використання VPN *з виділеним загальнодоступним IP* для обходу подібних обмежень. Дивіться https://yunohost.org/#/vpn_advantage
- Або можна переключитися на іншого провайдера",
+ "diagnosis_mail_fcrdns_nok_details": "Спочатку спробуйте налаштувати зворотний DNS з {ehlo_domain}
в інтерфейсі вашого інтернет-маршрутизатора або в інтерфейсі вашого хостинг-провайдера. (Деякі хостинг-провайдери можуть вимагати, щоб ви відправили їм запит у підтримку для цього).",
+ "diagnosis_mail_fcrdns_dns_missing": "У IPv{ipversion} не визначений зворотний DNS. Деякі листи можуть не доставлятися або позначатися як спам.",
+ "diagnosis_mail_fcrdns_ok": "Ваш зворотний DNS налаштовано правильно!",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "Помилка: {error}",
+ "diagnosis_mail_ehlo_could_not_diagnose": "Не вдалося діагностувати, чи доступний поштовий сервер postfix ззовні в IPv{ipversion}.",
+ "diagnosis_mail_ehlo_wrong_details": "EHLO, отриманий віддаленим діагностичним центром в IPv{ipversion}, відрізняється від домену вашого сервера.
Отриманий EHLO: {wrong_ehlo}
Очікуваний: {right_ehlo}
< br>Найпоширенішою причиною цієї проблеми є те, що порт 25 неправильно перенаправлений на ваш сервер. Крім того, переконайтеся, що в роботу сервера не втручається фаєрвол або зворотний проксі-сервер.",
+ "diagnosis_mail_ehlo_wrong": "Інший поштовий SMTP-сервер відповідає на IPv{ipversion}. Ваш сервер, ймовірно, не зможе отримувати електронні листи.",
+ "diagnosis_mail_ehlo_bad_answer_details": "Це може бути викликано тим, що замість вашого сервера відповідає інша машина.",
+ "diagnosis_mail_ehlo_bad_answer": "Не-SMTP служба відповіла на порту 25 на IPv{ipversion}",
+ "diagnosis_mail_ehlo_unreachable_details": "Не вдалося відкрити з'єднання за портом 25 з вашим сервером на IPv{ipversion}. Він здається недоступним.
1. Найбільш поширеною причиною цієї проблеми є те, що порт 25 неправильно перенаправлений на ваш сервер.
2. Ви також повинні переконатися, що служба postfix запущена.
3. На більш складних установках: переконайтеся, що немає фаєрвола або зворотного проксі.",
+ "diagnosis_mail_ehlo_unreachable": "Поштовий сервер SMTP недоступний ззовні по IPv{ipversion}. Він не зможе отримувати листи електронної пошти.",
+ "diagnosis_mail_ehlo_ok": "Поштовий сервер SMTP доступний ззовні і тому може отримувати електронні листи!",
+ "diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Деякі провайдери не дозволять вам розблокувати вихідний порт 25, тому що вони не піклуються про мережевий нейтралітет (Net Neutrality).
- Деякі з них пропонують альтернативу використання ретранслятора поштового сервера, хоча це має на увазі, що ретранслятор зможе шпигувати за вашим поштовим трафіком.
- Альтернативою для захисту конфіденційності є використання VPN *з виділеним загальнодоступним IP* для обходу такого роду обмежень. Дивіться https://yunohost.org/#/vpn_advantage
- Ви також можете розглянути можливість переходу на більш дружнього до мережевого нейтралітету провайдера",
+ "diagnosis_mail_outgoing_port_25_blocked_details": "Спочатку спробуйте розблокувати вихідний порт 25 в інтерфейсі вашого інтернет-маршрутизатора або в інтерфейсі вашого хостинг-провайдера. (Деякі хостинг-провайдери можуть вимагати, щоб ви відправили їм заявку в службу підтримки).",
+ "diagnosis_mail_outgoing_port_25_blocked": "Поштовий сервер SMTP не може відправляти електронні листи на інші сервери, оскільки вихідний порт 25 заблоковано в IPv{ipversion}.",
+ "app_manifest_install_ask_path": "Оберіть шлях URL (після домену), за яким має бути встановлено цей застосунок",
+ "yunohost_postinstall_end_tip": "Післявстановлення завершено! Щоб завершити доналаштування, будь ласка, розгляньте наступні варіанти:\n - додавання першого користувача через розділ 'Користувачі' вебадміністрації (або 'yunohost user create ' в командному рядку);\n - діагностика можливих проблем через розділ 'Діагностика' вебадміністрації (або 'yunohost diagnosis run' в командному рядку);\n - прочитання розділів 'Завершення встановлення' і 'Знайомство з YunoHost' у документації адміністратора: https://yunohost.org/admindoc.",
+ "yunohost_not_installed": "YunoHost установлений неправильно. Будь ласка, запустіть 'yunohost tools postinstall'",
+ "yunohost_installing": "Установлення YunoHost...",
+ "yunohost_configured": "YunoHost вже налаштовано",
+ "yunohost_already_installed": "YunoHost вже встановлено",
+ "user_updated": "Відомості про користувача змінено",
+ "user_update_failed": "Не вдалося оновити користувача {user}: {error}",
+ "user_unknown": "Невідомий користувач: {user}",
+ "user_home_creation_failed": "Не вдалося створити каталог домівки для користувача",
+ "user_deletion_failed": "Не вдалося видалити користувача {user}: {error}",
+ "user_deleted": "Користувача видалено",
+ "user_creation_failed": "Не вдалося створити користувача {user}: {error}",
+ "user_created": "Користувача створено",
+ "user_already_exists": "Користувач '{user}' вже існує",
+ "upnp_port_open_failed": "Не вдалося відкрити порт через UPnP",
+ "upnp_enabled": "UPnP увімкнено",
+ "upnp_disabled": "UPnP вимкнено",
+ "upnp_dev_not_found": "UPnP-пристрій не знайдено",
+ "upgrading_packages": "Оновлення пакетів...",
+ "upgrade_complete": "Оновлення завершено",
+ "updating_apt_cache": "Завантаження доступних оновлень для системних пакетів...",
+ "update_apt_cache_warning": "Щось пішло не так при оновленні кеша APT (менеджера пакунків Debian). Ось дамп рядків sources.list, який може допомогти визначити проблемні рядки:\n{sourceslist}",
+ "update_apt_cache_failed": "Неможливо оновити кеш APT (менеджер пакетів Debian). Ось дамп рядків sources.list, який може допомогти визначити проблемні рядки:\n{sourceslist}",
+ "unrestore_app": "{app} не буде оновлено",
+ "unlimit": "Квоти немає",
+ "unknown_main_domain_path": "Невідомий домен або шлях для '{app}'. Вам необхідно вказати домен і шлях, щоб мати можливість вказати URL для дозволу.",
+ "unexpected_error": "Щось пішло не так: {error}",
+ "unbackup_app": "{app} НЕ буде збережено",
+ "tools_upgrade_special_packages_completed": "Оновлення пакета YunoHost завершено.\nНатисніть [Enter] для повернення до командного рядка",
+ "tools_upgrade_special_packages_explanation": "Спеціальне оновлення триватиме у тлі. Будь ласка, не запускайте ніяких інших дій на вашому сервері протягом наступних ~ 10 хвилин (в залежності від швидкості обладнання). Після цього вам, можливо, доведеться заново увійти в вебадміністрації. Журнал оновлення буде доступний в Засоби → Журнал (в вебадміністрації) або за допомогою 'yunohost log list' (з командного рядка).",
+ "tools_upgrade_special_packages": "Тепер оновлюємо 'спеціальні' (пов'язані з yunohost) пакети…",
+ "tools_upgrade_regular_packages_failed": "Не вдалося оновити пакети: {packages_list}",
+ "tools_upgrade_regular_packages": "Тепер оновлюємо 'звичайні' (не пов'язані з yunohost) пакети…",
+ "tools_upgrade_cant_unhold_critical_packages": "Не вдалося розтримати критичні пакети…",
+ "tools_upgrade_cant_hold_critical_packages": "Не вдалося утримати критичні пакети…",
+ "tools_upgrade_cant_both": "Неможливо оновити систему і застосунки одночасно",
+ "tools_upgrade_at_least_one": "Будь ласка, вкажіть 'apps', або 'system'",
+ "this_action_broke_dpkg": "Ця дія порушила dpkg/APT (системні менеджери пакетів)... Ви можете спробувати вирішити цю проблему, під'єднавшись по SSH і запустивши `sudo apt install --fix-broken` та/або `sudo dpkg --configure -a`.",
+ "system_username_exists": "Ім'я користувача вже існує в списку користувачів системи",
+ "system_upgraded": "Систему оновлено",
+ "ssowat_conf_updated": "Конфігурацію SSOwat оновлено",
+ "ssowat_conf_generated": "Конфігурацію SSOwat перестворено",
+ "show_tile_cant_be_enabled_for_regex": "Ви не можете увімкнути 'show_tile' прямо зараз, тому що URL для дозволу '{permission}' являє собою регулярний вираз",
+ "show_tile_cant_be_enabled_for_url_not_defined": "Ви не можете увімкнути 'show_tile' прямо зараз, тому що спочатку ви повинні визначити URL для дозволу '{permission}'",
+ "service_unknown": "Невідома служба '{service}'",
+ "service_stopped": "Службу '{service}' зупинено",
+ "service_stop_failed": "Неможливо зупинити службу '{service}' \n\nНедавні журнали служби: {logs}",
+ "service_started": "Службу '{service}' запущено",
+ "service_start_failed": "Не вдалося запустити службу '{service}' \n\nНедавні журнали служби: {logs}",
+ "diagnosis_mail_outgoing_port_25_ok": "Поштовий сервер SMTP може відправляти електронні листи (вихідний порт 25 не заблоковано).",
+ "diagnosis_swap_tip": "Будь ласка, будьте обережні і знайте, що якщо сервер розміщує обсяг підкачки на SD-карті або SSD-накопичувачі, це може різко скоротити строк служби пристрою`.",
+ "diagnosis_swap_ok": "Система має {total} обсягу підкачки!",
+ "diagnosis_swap_notsomuch": "Система має тільки {total} обсягу підкачки. Щоб уникнути станоаищ, коли в системі закінчується пам'ять, слід передбачити наявність не менше {recommended} обсягу підкачки.",
+ "diagnosis_swap_none": "В системі повністю відсутня підкачка. Ви повинні розглянути можливість додавання принаймні {recommended} обсягу підкачки, щоб уникнути ситуацій, коли системі не вистачає пам'яті.",
+ "diagnosis_ram_ok": "Система все ще має {available} ({available_percent}%) оперативної пам'яті з {total}.",
+ "diagnosis_ram_low": "У системі наявно {available} ({available_percent}%) оперативної пам'яті (з {total}). Будьте уважні.",
+ "diagnosis_ram_verylow": "Система має тільки {available} ({available_percent}%) оперативної пам'яті! (з {total})",
+ "diagnosis_diskusage_ok": "У сховищі {mountpoint}
(на пристрої {device}
) залишилося {free} ({free_percent}%) вільного місця (з {total})!",
+ "diagnosis_diskusage_low": "Сховище {mountpoint}
(на пристрої {device}
) має тільки {free} ({free_percent}%) вільного місця (з {total}). Будьте уважні.",
+ "diagnosis_diskusage_verylow": "Сховище {mountpoint}
(на пристрої {device}
) має тільки {free} ({free_percent}%) вільного місця (з {total}). Вам дійсно варто подумати про очищення простору!",
+ "diagnosis_services_bad_status_tip": "Ви можете спробувати перезапустити службу, а якщо це не допоможе, подивіться журнали служби в вебадміністрації (з командного рядка це можна зробити за допомогою yunohost service restart {service} і yunohost service log {service} ).",
+ "diagnosis_services_bad_status": "Служба {service} у стані {status} :(",
+ "diagnosis_services_conf_broken": "Для служби {service} порушена конфігурація!",
+ "diagnosis_services_running": "Службу {service} запущено!",
+ "diagnosis_domain_expires_in": "Строк дії {domain} спливе через {days} днів.",
+ "diagnosis_domain_expiration_error": "Строк дії деяких доменів НЕЗАБАРОМ спливе!",
+ "diagnosis_domain_expiration_warning": "Строк дії деяких доменів спливе найближчим часом!",
+ "diagnosis_domain_expiration_success": "Ваші домени зареєстровані і не збираються спливати найближчим часом.",
+ "diagnosis_domain_expiration_not_found_details": "Відомості WHOIS для домену {domain} не містять даних про строк дії?",
+ "diagnosis_domain_not_found_details": "Домен {domain} не існує в базі даних WHOIS або строк його дії сплив!",
+ "diagnosis_domain_expiration_not_found": "Неможливо перевірити строк дії деяких доменів",
+ "diagnosis_dns_specialusedomain": "Домен {domain} заснований на домені верхнього рівня спеціального призначення (TLD) і тому не очікується, що у нього будуть актуальні записи DNS.",
+ "diagnosis_dns_try_dyndns_update_force": "Конфігурація DNS цього домену повинна автоматично управлятися YunoHost. Якщо це не так, ви можете спробувати примусово оновити її за допомогою команди yunohost dyndns update --force .",
+ "diagnosis_dns_point_to_doc": "Якщо вам потрібна допомога з налаштування DNS-записів, зверніться до документації на сайті https://yunohost.org/dns_config.",
+ "diagnosis_dns_discrepancy": "Наступний запис DNS, схоже, не відповідає рекомендованій конфігурації:
Тип: {type}
Назва: {name}
Поточне значення: {current}
Очікуване значення: {value}
",
+ "diagnosis_dns_missing_record": "Згідно рекомендованої конфігурації DNS, ви повинні додати запис DNS з наступними відомостями.
Тип: {type}
Назва: {name}
Значення: {value}
",
+ "diagnosis_dns_bad_conf": "Деякі DNS-записи відсутні або неправильні для домену {domain} (категорія {category})",
+ "diagnosis_dns_good_conf": "DNS-записи правильно налаштовані для домену {domain} (категорія {category})",
+ "diagnosis_ip_weird_resolvconf_details": "Файл /etc/resolv.conf
повинен бути символічним посиланням на /etc/resolvconf/run/resolv.conf
, що вказує на 127.0.0.1
(dnsmasq). Якщо ви хочете вручну налаштувати DNS вирішувачі (resolvers), відредагуйте /etc/resolv.dnsmasq.conf
.",
+ "diagnosis_ip_weird_resolvconf": "Роздільність DNS, схоже, працює, але схоже, що ви використовуєте користувацьку /etc/resolv.conf
.",
+ "diagnosis_ip_broken_resolvconf": "Схоже, що роздільність доменних імен на вашому сервері порушено, що пов'язано з тим, що /etc/resolv.conf
не вказує на 127.0.0.1
.",
+ "diagnosis_ip_broken_dnsresolution": "Роздільність доменних імен, схоже, з якоїсь причини не працює... Фаєрвол блокує DNS-запити?",
+ "diagnosis_ip_dnsresolution_working": "Роздільність доменних імен працює!",
+ "diagnosis_ip_not_connected_at_all": "Здається, сервер взагалі не під'єднаний до Інтернету!?",
+ "diagnosis_ip_local": "Локальний IP: {local}
",
+ "diagnosis_ip_global": "Глобальний IP: {global}
",
+ "diagnosis_ip_no_ipv6_tip": "Наявність робочого IPv6 не є обов'язковим для роботи вашого сервера, але це краще для здоров'я Інтернету в цілому. IPv6 зазвичай автоматично налаштовується системою або вашим провайдером, якщо він доступний. В іншому випадку вам, можливо, доведеться налаштувати деякі речі вручну, як пояснюється в документації тут: https://yunohost.org/#/ipv6. Якщо ви не можете увімкнути IPv6 або якщо це здається вам занадто технічним, ви також можете сміливо нехтувати цим попередженням.",
+ "diagnosis_ip_no_ipv6": "Сервер не має робочого IPv6.",
+ "diagnosis_ip_connected_ipv6": "Сервер під'єднаний до Інтернету через IPv6!",
+ "diagnosis_ip_no_ipv4": "Сервер не має робочого IPv4.",
+ "diagnosis_ip_connected_ipv4": "Сервер під'єднаний до Інтернету через IPv4!",
+ "diagnosis_no_cache": "Для категорії «{category}» ще немає кеша діагностики",
+ "diagnosis_failed": "Не вдалося отримати результат діагностики для категорії '{category}': {error}",
+ "diagnosis_everything_ok": "Усе виглядає добре для {category}!",
+ "diagnosis_found_warnings": "Знайдено {warnings} пунктів, які можна поліпшити для {category}.",
+ "diagnosis_found_errors_and_warnings": "Знайдено {errors} істотний (і) питання (и) (і {warnings} попередження (я)), що відносяться до {category}!",
+ "diagnosis_found_errors": "Знайдена {errors} важлива проблема (і), пов'язана з {category}!",
+ "diagnosis_ignored_issues": "(+ {nb_ignored} знехтувана проблема (проблеми))",
+ "diagnosis_cant_run_because_of_dep": "Неможливо запустити діагностику для {category}, поки є важливі проблеми, пов'язані з {dep}.",
+ "diagnosis_cache_still_valid": "(Кеш все ще дійсний для діагностики {category}. Повторна діагностика поки не проводиться!)",
+ "diagnosis_failed_for_category": "Не вдалося провести діагностику для категорії '{category}': {error}",
+ "diagnosis_display_tip": "Щоб побачити знайдені проблеми, ви можете перейти в розділ Діагностика в вебадміністрації або виконати команду 'yunohost diagnosis show --issues --human-readable' з командного рядка.",
+ "diagnosis_package_installed_from_sury_details": "Деякі пакети були ненавмисно встановлені зі стороннього репозиторію під назвою Sury. Команда YunoHost поліпшила стратегію роботи з цими пакетами, але очікується, що в деяких системах, які встановили застосунки PHP7.3 ще на Stretch, залишаться деякі невідповідності. Щоб виправити це становище, спробуйте виконати наступну команду: {cmd_to_fix} ",
+ "diagnosis_package_installed_from_sury": "Деякі системні пакети мають бути зістарені у версії",
+ "diagnosis_backports_in_sources_list": "Схоже, що apt (менеджер пакетів) налаштований на використання репозиторія backports. Якщо ви не знаєте, що робите, ми наполегливо не радимо встановлювати пакети з backports, тому що це може привести до нестабільності або конфліктів у вашій системі.",
+ "diagnosis_basesystem_ynh_inconsistent_versions": "Ви використовуєте несумісні версії пакетів YunoHost... швидше за все, через невдале або часткове оновлення.",
+ "diagnosis_basesystem_ynh_main_version": "Сервер працює під управлінням YunoHost {main_version} ({repo})",
+ "diagnosis_basesystem_ynh_single_version": "{package} версія: {version} ({repo})",
+ "diagnosis_basesystem_kernel": "Сервер працює під управлінням ядра Linux {kernel_version}",
+ "diagnosis_basesystem_host": "Сервер працює під управлінням Debian {debian_version}",
+ "diagnosis_basesystem_hardware_model": "Модель сервера - {model}",
+ "diagnosis_basesystem_hardware": "Архітектура апаратного забезпечення сервера - {virt} {arch}",
+ "custom_app_url_required": "Ви повинні надати URL-адресу для оновлення вашого користувацького застосунку {app}",
+ "confirm_app_install_thirdparty": "НЕБЕЗПЕЧНО! Цей застосунок не входить у каталог застосунків YunoHost. Установлення сторонніх застосунків може порушити цілісність і безпеку вашої системи. Вам не слід встановлювати його, якщо ви не знаєте, що робите. НІЯКОЇ ПІДТРИМКИ НЕ БУДЕ, якщо цей застосунок не буде працювати або зламає вашу систему... Якщо ви все одно готові піти на такий ризик, введіть '{answers}'",
+ "confirm_app_install_danger": "НЕБЕЗПЕЧНО! Відомо, що цей застосунок все ще експериментальний (якщо не сказати, що він явно не працює)! Вам не слід встановлювати його, якщо ви не знаєте, що робите. Ніякої підтримки не буде надано, якщо цей застосунок не буде працювати або зламає вашу систему... Якщо ви все одно готові ризикнути, введіть '{answers}'",
+ "confirm_app_install_warning": "Попередження: Цей застосунок може працювати, але він не дуже добре інтегрований в YunoHost. Деякі функції, такі як єдина реєстрація та резервне копіювання/відновлення, можуть бути недоступні. Все одно встановити? [{answers}]. ",
+ "certmanager_unable_to_parse_self_CA_name": "Не вдалося розібрати назву самопідписного центру (файл: {file})",
+ "certmanager_self_ca_conf_file_not_found": "Не вдалося знайти файл конфігурації для самопідписного центру (файл: {file})",
+ "certmanager_no_cert_file": "Не вдалося розпізнати файл сертифіката для домену {domain} (файл: {file})",
+ "certmanager_hit_rate_limit": "Для цього набору доменів {domain} недавно було випущено дуже багато сертифікатів. Будь ласка, спробуйте ще раз пізніше. Див. https://letsencrypt.org/docs/rate-limits/ для отримання подробиць",
+ "certmanager_warning_subdomain_dns_record": "Піддомен '{subdomain}' не дозволяється на тій же IP-адресі, що і '{domain}'. Деякі функції будуть недоступні, поки ви не виправите це і не перестворите сертифікат.",
+ "certmanager_domain_http_not_working": "Домен {domain}, схоже, не доступний через HTTP. Будь ласка, перевірте категорію 'Мережа' в діагностиці для отримання додаткових даних. (Якщо ви знаєте, що робите, використовуйте '--no-checks', щоб вимкнути ці перевірки).",
+ "certmanager_domain_dns_ip_differs_from_public_ip": "DNS-записи для домену '{domain}' відрізняються від IP цього сервера. Будь ласка, перевірте категорію 'DNS-записи' (основні) в діагностиці для отримання додаткових даних. Якщо ви недавно змінили запис A, будь ласка, зачекайте, поки він пошириться (деякі програми перевірки поширення DNS доступні в Інтернеті). (Якщо ви знаєте, що робите, використовуйте '--no-checks', щоб вимкнути ці перевірки).",
+ "certmanager_domain_cert_not_selfsigned": "Сертифікат для домену {domain} не є самопідписаним. Ви впевнені, що хочете замінити його? (Для цього використовуйте '--force').",
+ "certmanager_domain_not_diagnosed_yet": "Поки немає результатів діагностики для домену {domain}. Будь ласка, повторно проведіть діагностику для категорій 'DNS-записи' і 'Мережа' в розділі діагностики, щоб перевірити, чи готовий домен до Let's Encrypt. (Або, якщо ви знаєте, що робите, використовуйте '--no-checks', щоб вимкнути ці перевірки).",
+ "certmanager_certificate_fetching_or_enabling_failed": "Спроба використовувати новий сертифікат для {domain} не спрацювала...",
+ "certmanager_cert_signing_failed": "Не вдалося підписати новий сертифікат",
+ "certmanager_cert_renew_success": "Сертифікат Let's Encrypt оновлений для домену '{domain}'",
+ "certmanager_cert_install_success_selfsigned": "Самопідписаний сертифікат тепер встановлений для домену '{domain}'",
+ "certmanager_cert_install_success": "Сертифікат Let's Encrypt тепер встановлений для домена '{domain}'",
+ "certmanager_cannot_read_cert": "Щось не так сталося при спробі відкрити поточний сертифікат для домена {domain} (файл: {file}), причина: {reason}",
+ "certmanager_attempt_to_replace_valid_cert": "Ви намагаєтеся перезаписати хороший дійсний сертифікат для домену {domain}! (Використовуйте --force для обходу)",
+ "certmanager_attempt_to_renew_valid_cert": "Строк дії сертифіката для домена '{domain}' не закінчується! (Ви можете використовувати --force, якщо знаєте, що робите)",
+ "certmanager_attempt_to_renew_nonLE_cert": "Сертифікат для домену '{domain}' не випущено Let's Encrypt. Неможливо продовжити його автоматично!",
+ "certmanager_acme_not_configured_for_domain": "Завдання ACME не може бути запущене для {domain} прямо зараз, тому що в його nginx-конфігурації відсутній відповідний фрагмент коду... Будь ласка, переконайтеся, що конфігурація nginx оновлена за допомогою `yunohost tools regen-conf nginx --dry-run --with-diff`.",
+ "backup_with_no_restore_script_for_app": "{app} не має скрипта відновлення, ви не зможете автоматично відновити резервну копію цього застосунку.",
+ "backup_with_no_backup_script_for_app": "Застосунок '{app}' не має скрипта резервного копіювання. Нехтую ним.",
+ "backup_unable_to_organize_files": "Неможливо використовувати швидкий спосіб для організації файлів в архіві",
+ "backup_system_part_failed": "Не вдалося створити резервну копію системної частини '{part}'",
+ "backup_running_hooks": "Запуск гачків (hook) резервного копіювання...",
+ "backup_permission": "Дозвіл на резервне копіювання для {app}",
+ "backup_output_symlink_dir_broken": "Ваш архівний каталог '{path}' є неробочим символічним посиланням. Можливо, ви забули перемонтувати або підключити носій, на який вона вказує.",
+ "backup_output_directory_required": "Ви повинні вказати вихідний каталог для резервного копіювання",
+ "backup_output_directory_not_empty": "Ви повинні вибрати порожній вихідний каталог",
+ "backup_output_directory_forbidden": "Виберіть інший вихідний каталог. Резервні копії не можуть бути створені в підкаталогах /bin,/boot,/dev,/etc,/lib,/root,/run,/sbin,/sys,/usr,/var або /home/yunohost.backup/archives",
+ "backup_nothings_done": "Нема що зберігати",
+ "backup_no_uncompress_archive_dir": "Немає такого каталогу нестислого архіву",
+ "backup_mount_archive_for_restore": "Підготовлення архіву для відновлення...",
+ "backup_method_tar_finished": "Створено архів резервного копіювання TAR",
+ "backup_method_custom_finished": "Користувацький спосіб резервного копіювання '{method}' завершено",
+ "backup_method_copy_finished": "Резервне копіювання завершено",
+ "backup_hook_unknown": "Гачок (hook) резервного копіювання '{hook}' невідомий",
+ "backup_deleted": "Резервна копія видалена",
+ "backup_delete_error": "Не вдалося видалити '{path}'",
+ "backup_custom_mount_error": "Користувацький спосіб резервного копіювання не зміг пройти етап 'монтування'",
+ "backup_custom_backup_error": "Користувацький спосіб резервного копіювання не зміг пройти етап 'резервне копіювання'",
+ "backup_csv_creation_failed": "Не вдалося створити CSV-файл, необхідний для відновлення",
+ "backup_csv_addition_failed": "Не вдалося додати файли для резервного копіювання в CSV-файл",
+ "backup_creation_failed": "Не вдалося створити архів резервного копіювання",
+ "backup_create_size_estimation": "Архів буде містити близько {size} даних.",
+ "backup_created": "Резервна копія створена",
+ "backup_couldnt_bind": "Не вдалося зв'язати {src} з {dest}.",
+ "backup_copying_to_organize_the_archive": "Копіювання {size} МБ для організації архіву",
+ "backup_cleaning_failed": "Не вдалося очистити тимчасовий каталог резервного копіювання",
+ "backup_cant_mount_uncompress_archive": "Не вдалося змонтувати нестислий архів як захищений від запису",
+ "backup_ask_for_copying_if_needed": "Ви бажаєте тимчасово виконати резервне копіювання з використанням {size} МБ? (Цей спосіб використовується, оскільки деякі файли не можуть бути підготовлені дієвіше).",
+ "backup_archive_writing_error": "Не вдалося додати файли '{source}' (названі в архіві '{dest}') для резервного копіювання в стислий архів '{archive}'",
+ "backup_archive_system_part_not_available": "Системна частина '{part}' недоступна в цій резервній копії",
+ "backup_archive_corrupted": "Схоже, що архів резервної копії '{archive}' пошкоджений: {error}",
+ "backup_archive_cant_retrieve_info_json": "Не вдалося завантажити відомості для архіву '{archive}'... info.json не може бути отриманий(або не є правильним json).",
+ "backup_archive_open_failed": "Не вдалося відкрити архів резервної копії",
+ "backup_archive_name_unknown": "Невідомий локальний архів резервного копіювання з назвою '{name}'",
+ "backup_archive_name_exists": "Архів резервного копіювання з такою назвою вже існує.",
+ "backup_archive_broken_link": "Не вдалося отримати доступ до архіву резервного копіювання (неробоче посилання на {path})",
+ "backup_archive_app_not_found": "Не вдалося знайти {app} в архіві резервного копіювання",
+ "backup_applying_method_tar": "Створення резервного TAR-архіву...",
+ "backup_applying_method_custom": "Виклик користувацького способу резервного копіювання '{method}'...",
+ "backup_applying_method_copy": "Копіювання всіх файлів у резервну копію...",
+ "backup_app_failed": "Не вдалося створити резервну копію {app}",
+ "backup_actually_backuping": "Створення резервного архіву з зібраних файлів...",
+ "backup_abstract_method": "Цей спосіб резервного копіювання ще не реалізований",
+ "ask_password": "Пароль",
+ "ask_new_path": "Новий шлях",
+ "ask_new_domain": "Новий домен",
+ "ask_new_admin_password": "Новий пароль адміністрації",
+ "ask_main_domain": "Основний домен",
+ "ask_lastname": "Прізвище",
+ "ask_firstname": "Ім'я",
+ "ask_user_domain": "Домен для адреси е-пошти користувача і облікового запису XMPP",
+ "apps_catalog_update_success": "Каталог застосунків був оновлений!",
+ "apps_catalog_obsolete_cache": "Кеш каталогу застосунків порожній або застарів.",
+ "apps_catalog_failed_to_download": "Неможливо завантажити каталог застосунків {apps_catalog}: {error}",
+ "apps_catalog_updating": "Оновлення каталогу застосунків…",
+ "apps_catalog_init_success": "Систему каталогу застосунків ініціалізовано!",
+ "apps_already_up_to_date": "Усі застосунки вже оновлено",
+ "app_packaging_format_not_supported": "Цей застосунок не може бути встановлено, тому що формат його упакування не підтримується вашою версією YunoHost. Можливо, вам слід оновити систему.",
+ "app_upgraded": "{app} оновлено",
+ "app_upgrade_some_app_failed": "Деякі застосунки не можуть бути оновлені",
+ "app_upgrade_script_failed": "Сталася помилка в скрипті оновлення застосунку",
+ "app_upgrade_failed": "Не вдалося оновити {app}: {error}",
+ "app_upgrade_app_name": "Зараз оновлюємо {app}...",
+ "app_upgrade_several_apps": "Наступні застосунки буде оновлено: {apps}",
+ "app_unsupported_remote_type": "Для застосунку використовується непідтримуваний віддалений тип",
+ "app_unknown": "Невідомий застосунок",
+ "app_start_restore": "Відновлення {app}...",
+ "app_start_backup": "Збирання файлів для резервного копіювання {app}...",
+ "app_start_remove": "Вилучення {app}...",
+ "app_start_install": "Установлення {app}...",
+ "app_sources_fetch_failed": "Не вдалося отримати джерельні файли, URL-адреса правильна?",
+ "app_restore_script_failed": "Сталася помилка всередині скрипта відновлення застосунку",
+ "app_restore_failed": "Не вдалося відновити {app}: {error}",
+ "app_remove_after_failed_install": "Вилучення застосунку після збою встановлення...",
+ "app_requirements_unmeet": "Вимоги не виконані для {app}, пакет {pkgname} ({version}) повинен бути {spec}",
+ "app_requirements_checking": "Перевіряння необхідних пакетів для {app}...",
+ "app_removed": "{app} видалено",
+ "app_not_properly_removed": "{app} не було видалено належним чином",
+ "app_not_installed": "Не вдалося знайти {app} в списку встановлених застосунків: {all_apps}",
+ "app_not_correctly_installed": "{app}, схоже, неправильно встановлено",
+ "app_not_upgraded": "Застосунок '{failed_app}' не вдалося оновити, і, як наслідок, оновлення таких застосунків було скасовано: {apps}",
+ "app_manifest_install_ask_is_public": "Чи має цей застосунок бути відкритим для анонімних відвідувачів?",
+ "app_manifest_install_ask_admin": "Виберіть користувача-адміністратора для цього застосунку",
+ "app_manifest_install_ask_password": "Виберіть пароль адміністрації для цього застосунку",
+ "diagnosis_description_apps": "Застосунки",
+ "user_import_success": "Користувачів успішно імпортовано",
+ "user_import_nothing_to_do": "Не потрібно імпортувати жодного користувача",
+ "user_import_failed": "Операція імпорту користувачів цілковито не вдалася",
+ "user_import_partial_failed": "Операція імпорту користувачів частково не вдалася",
+ "user_import_missing_columns": "Відсутні такі стовпці: {columns}",
+ "user_import_bad_file": "Ваш файл CSV неправильно відформатовано, він буде знехтуваний, щоб уникнути потенційної втрати даних",
+ "user_import_bad_line": "Неправильний рядок {line}: {details}",
+ "invalid_password": "Недійсний пароль",
+ "log_user_import": "Імпорт користувачів",
+ "ldap_server_is_down_restart_it": "Службу LDAP вимкнено, спробуйте перезапустити її...",
+ "ldap_server_down": "Не вдається під'єднатися до сервера LDAP",
+ "global_settings_setting_security_experimental_enabled": "Увімкнути експериментальні функції безпеки (не вмикайте це, якщо ви не знаєте, що робите!)",
+ "diagnosis_apps_deprecated_practices": "Установлена версія цього застосунку все ще використовує деякі надзастарілі практики упакування. Вам дійсно варто подумати про його оновлення.",
+ "diagnosis_apps_outdated_ynh_requirement": "Установлена версія цього застосунку вимагає лише Yunohost >= 2.x, що, як правило, вказує на те, що воно не відповідає сучасним рекомендаційним практикам упакування та порадникам. Вам дійсно варто подумати про його оновлення.",
+ "diagnosis_apps_bad_quality": "Цей застосунок наразі позначено як зламаний у каталозі застосунків YunoHost. Це може бути тимчасовою проблемою, поки організатори намагаються вирішити цю проблему. Тим часом оновлення цього застосунку вимкнено.",
+ "diagnosis_apps_broken": "Цей застосунок наразі позначено як зламаний у каталозі застосунків YunoHost. Це може бути тимчасовою проблемою, поки організатори намагаються вирішити цю проблему. Тим часом оновлення цього застосунку вимкнено.",
+ "diagnosis_apps_not_in_app_catalog": "Цей застосунок не міститься у каталозі застосунків YunoHost. Якщо він був у минулому і був видалений, вам слід подумати про видалення цього застосунку, оскільки він не отримає оновлення, і це може поставити під загрозу цілісність та безпеку вашої системи.",
+ "diagnosis_apps_issue": "Виявлено проблему із застосунком {app}",
+ "diagnosis_apps_allgood": "Усі встановлені застосунки дотримуються основних способів упакування",
+ "diagnosis_high_number_auth_failures": "Останнім часом сталася підозріло велика кількість помилок автентифікації. Ви можете переконатися, що fail2ban працює і правильно налаштований, або скористатися власним портом для SSH, як описано в https://yunohost.org/security.",
+ "global_settings_setting_security_nginx_redirect_to_https": "Типово переспрямовувати HTTP-запити до HTTP (НЕ ВИМИКАЙТЕ, якщо ви дійсно не знаєте, що робите!)",
+ "app_config_unable_to_apply": "Не вдалося застосувати значення панелі конфігурації.",
+ "app_config_unable_to_read": "Не вдалося розпізнати значення панелі конфігурації.",
+ "config_apply_failed": "Не вдалося застосувати нову конфігурацію: {error}",
+ "config_cant_set_value_on_section": "Ви не можете встановити одне значення на весь розділ конфігурації.",
+ "config_forbidden_keyword": "Ключове слово '{keyword}' зарезервовано, ви не можете створити або використовувати панель конфігурації з запитом із таким ID.",
+ "config_no_panel": "Панель конфігурації не знайдено.",
+ "config_unknown_filter_key": "Ключ фільтра '{filter_key}' недійсний.",
+ "config_validate_color": "Колір RGB має бути дійсним шістнадцятковим кольоровим кодом",
+ "config_validate_date": "Дата має бути дійсною, наприклад, у форматі РРРР-ММ-ДД",
+ "config_validate_email": "Е-пошта має бути дійсною",
+ "config_validate_time": "Час має бути дійсним, наприклад ГГ:ХХ",
+ "config_validate_url": "Вебадреса має бути дійсною",
+ "config_version_not_supported": "Версії конфігураційної панелі '{version}' не підтримуються.",
+ "danger": "Небезпека:",
+ "file_extension_not_accepted": "Файл '{path}' відхиляється, бо його розширення не входить в число прийнятих розширень: {accept}",
+ "invalid_number_min": "Має бути більшим за {min}",
+ "invalid_number_max": "Має бути меншим за {max}",
+ "log_app_config_set": "Застосувати конфігурацію до застосунку '{}'",
+ "service_not_reloading_because_conf_broken": "Неможливо перезавантажити/перезапустити службу '{name}', тому що її конфігурацію порушено: {errors}",
+ "app_argument_password_help_optional": "Введіть один пробіл, щоб очистити пароль",
+ "app_argument_password_help_keep": "Натисніть Enter, щоб зберегти поточне значення"
+}
\ No newline at end of file
diff --git a/locales/zh_Hans.json b/locales/zh_Hans.json
index 0967ef424..9176ebab9 100644
--- a/locales/zh_Hans.json
+++ b/locales/zh_Hans.json
@@ -1 +1,629 @@
-{}
+{
+ "password_too_simple_1": "密码长度至少为8个字符",
+ "backup_created": "备份已创建",
+ "app_start_remove": "正在删除{app}……",
+ "admin_password_change_failed": "无法修改密码",
+ "admin_password_too_long": "请选择一个小于127个字符的密码",
+ "app_upgrade_failed": "不能升级{app}:{error}",
+ "app_id_invalid": "无效 app ID",
+ "app_unknown": "未知应用",
+ "admin_password_changed": "管理密码已更改",
+ "aborting": "正在放弃。",
+ "admin_password": "管理员密码",
+ "app_start_restore": "正在恢复{app}……",
+ "action_invalid": "无效操作 '{action}'",
+ "ask_lastname": "姓",
+ "diagnosis_everything_ok": "{category}一切看起来不错!",
+ "diagnosis_found_warnings": "找到{warnings}项,可能需要{category}进行改进。",
+ "diagnosis_found_errors_and_warnings": "发现与{category}相关的{errors}个重要问题(和{warnings}警告)!",
+ "diagnosis_found_errors": "发现与{category}相关的{errors}个重要问题!",
+ "diagnosis_ignored_issues": "(+ {nb_ignored} 个被忽略的问题)",
+ "diagnosis_cant_run_because_of_dep": "存在与{dep}相关的重要问题时,无法对{category}进行诊断。",
+ "diagnosis_cache_still_valid": "(高速缓存对于{category}诊断仍然有效。暂时不会对其进行重新诊断!)",
+ "diagnosis_failed_for_category": "诊断类别 '{category}'失败: {error}",
+ "diagnosis_display_tip": "要查看发现的问题,您可以转到Webadmin的“诊断”部分,或从命令行运行'yunohost diagnosis show --issues --human-readable'。",
+ "diagnosis_package_installed_from_sury": "一些系统软件包应降级",
+ "diagnosis_backports_in_sources_list": "看起来apt(程序包管理器)已配置为使用backports存储库。 除非您真的知道自己在做什么,否则我们强烈建议您不要从backports安装软件包,因为这很可能在您的系统上造成不稳定或冲突。",
+ "diagnosis_basesystem_ynh_inconsistent_versions": "您运行的YunoHost软件包版本不一致,很可能是由于升级失败或部分升级造成的。",
+ "diagnosis_basesystem_ynh_main_version": "服务器正在运行YunoHost {main_version} ({repo})",
+ "diagnosis_basesystem_ynh_single_version": "{package} 版本: {version} ({repo})",
+ "diagnosis_basesystem_kernel": "服务器正在运行Linux kernel {kernel_version}",
+ "diagnosis_basesystem_host": "服务器正在运行Debian {debian_version}",
+ "diagnosis_basesystem_hardware_model": "服务器型号为 {model}",
+ "diagnosis_basesystem_hardware": "服务器硬件架构为{virt} {arch}",
+ "custom_app_url_required": "您必须提供URL才能升级自定义应用 {app}",
+ "confirm_app_install_thirdparty": "危险! 该应用程序不是YunoHost的应用程序目录的一部分。 安装第三方应用程序可能会损害系统的完整性和安全性。 除非您知道自己在做什么,否则可能不应该安装它, 如果此应用无法运行或无法正常使用系统,将不会提供任何支持。如果您仍然愿意承担此风险,请输入'{answers}'",
+ "confirm_app_install_danger": "危险! 已知此应用仍处于实验阶段(如果未明确无法正常运行)! 除非您知道自己在做什么,否则可能不应该安装它。 如果此应用无法运行或无法正常使用系统,将不会提供任何支持。如果您仍然愿意承担此风险,请输入'{answers}'",
+ "confirm_app_install_warning": "警告:此应用程序可能可以运行,但未与YunoHost很好地集成。某些功能(例如单点登录和备份/还原)可能不可用, 仍要安装吗? [{answers}] ",
+ "certmanager_unable_to_parse_self_CA_name": "无法解析自签名授权的名称 (file: {file})",
+ "certmanager_self_ca_conf_file_not_found": "找不到用于自签名授权的配置文件(file: {file})",
+ "certmanager_no_cert_file": "无法读取域{domain}的证书文件(file: {file})",
+ "certmanager_hit_rate_limit": "最近已经为此域{domain}颁发了太多的证书。请稍后再试。有关更多详细信息,请参见https://letsencrypt.org/docs/rate-limits/",
+ "certmanager_warning_subdomain_dns_record": "子域'{subdomain}' 不能解析为与 '{domain}'相同的IP地址, 在修复此问题并重新生成证书之前,某些功能将不可用。",
+ "certmanager_domain_http_not_working": "域 {domain}似乎无法通过HTTP访问。请检查诊断中的“网络”类别以获取更多信息。(如果您知道自己在做什么,请使用“ --no-checks”关闭这些检查。)",
+ "certmanager_domain_dns_ip_differs_from_public_ip": "域'{domain}' 的DNS记录与此服务器的IP不同。请检查诊断中的“ DNS记录”(基本)类别,以获取更多信息。 如果您最近修改了A记录,请等待它传播(某些DNS传播检查器可在线获得)。 (如果您知道自己在做什么,请使用“ --no-checks”关闭这些检查。)",
+ "certmanager_domain_cert_not_selfsigned": "域 {domain} 的证书不是自签名的, 您确定要更换它吗?(使用“ --force”这样做。)",
+ "certmanager_domain_not_diagnosed_yet": "尚无域{domain} 的诊断结果。请在诊断部分中针对“ DNS记录”和“ Web”类别重新运行诊断,以检查该域是否已准备好安装“Let's Encrypt”证书。(或者,如果您知道自己在做什么,请使用“ --no-checks”关闭这些检查。)",
+ "certmanager_certificate_fetching_or_enabling_failed": "尝试将新证书用于 {domain}无效...",
+ "certmanager_cert_signing_failed": "无法签署新证书",
+ "certmanager_cert_install_success_selfsigned": "为域 '{domain}'安装了自签名证书",
+ "certmanager_cert_renew_success": "为域 '{domain}'续订“Let's Encrypt”证书",
+ "certmanager_cert_install_success": "为域'{domain}'安装“Let's Encrypt”证书",
+ "certmanager_cannot_read_cert": "尝试为域 {domain}(file: {file})打开当前证书时发生错误,原因: {reason}",
+ "certmanager_attempt_to_replace_valid_cert": "您正在尝试覆盖域{domain}的有效证书!(使用--force绕过)",
+ "certmanager_attempt_to_renew_valid_cert": "域'{domain}'的证书不会过期!(如果知道自己在做什么,则可以使用--force)",
+ "certmanager_attempt_to_renew_nonLE_cert": "“Let's Encrypt”未颁发域'{domain}'的证书,无法自动续订!",
+ "certmanager_acme_not_configured_for_domain": "目前无法针对{domain}运行ACME挑战,因为其nginx conf缺少相应的代码段...请使用“yunohost tools regen-conf nginx --dry-run --with-diff”确保您的nginx配置是最新的。",
+ "backup_with_no_restore_script_for_app": "{app} 没有还原脚本,您将无法自动还原该应用程序的备份。",
+ "backup_with_no_backup_script_for_app": "应用'{app}'没有备份脚本。无视。",
+ "backup_unable_to_organize_files": "无法使用快速方法来组织档案中的文件",
+ "backup_system_part_failed": "无法备份'{part}'系统部分",
+ "backup_running_hooks": "正在运行备份挂钩...",
+ "backup_permission": "{app}的备份权限",
+ "backup_output_symlink_dir_broken": "您的存档目录'{path}' 是断开的符号链接。 也许您忘记了重新安装/装入或插入它指向的存储介质。",
+ "backup_output_directory_required": "您必须提供备份的输出目录",
+ "backup_output_directory_not_empty": "您应该选择一个空的输出目录",
+ "backup_output_directory_forbidden": "选择一个不同的输出目录。无法在/bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var或/home/yunohost.backup/archives子文件夹中创建备份",
+ "backup_nothings_done": "没什么可保存的",
+ "backup_no_uncompress_archive_dir": "没有这样的未压缩存档目录",
+ "backup_mount_archive_for_restore": "正在准备存档以进行恢复...",
+ "backup_method_tar_finished": "TAR备份存档已创建",
+ "backup_method_custom_finished": "自定义备份方法'{method}' 已完成",
+ "backup_method_copy_finished": "备份副本已完成",
+ "backup_hook_unknown": "备用挂钩'{hook}'未知",
+ "backup_deleted": "备份已删除",
+ "backup_delete_error": "无法删除'{path}'",
+ "backup_custom_mount_error": "自定义备份方法无法通过“挂载”步骤",
+ "backup_custom_backup_error": "自定义备份方法无法通过“备份”步骤",
+ "backup_csv_creation_failed": "无法创建还原所需的CSV文件",
+ "backup_csv_addition_failed": "无法将文件添加到CSV文件中进行备份",
+ "backup_creation_failed": "无法创建备份存档",
+ "backup_create_size_estimation": "归档文件将包含约{size}个数据。",
+ "backup_couldnt_bind": "无法将 {src} 绑定到{dest}.",
+ "backup_copying_to_organize_the_archive": "复制{size} MB来整理档案",
+ "backup_cleaning_failed": "无法清理临时备份文件夹",
+ "backup_cant_mount_uncompress_archive": "无法将未压缩的归档文件挂载为写保护",
+ "backup_ask_for_copying_if_needed": "您是否要临时使用{size} MB进行备份?(由于无法使用更有效的方法准备某些文件,因此使用这种方式。)",
+ "backup_archive_writing_error": "无法将要备份的文件'{source}'(在归档文'{dest}'中命名)添加到压缩归档文件 '{archive}'s}”中",
+ "backup_archive_system_part_not_available": "该备份中系统部分'{part}'不可用",
+ "backup_archive_corrupted": "备份存档'{archive}' 似乎已损坏 : {error}",
+ "backup_archive_cant_retrieve_info_json": "无法加载档案'{archive}'的信息...无法检索到info.json(或者它不是有效的json)。",
+ "backup_archive_open_failed": "无法打开备份档案",
+ "backup_archive_name_unknown": "未知的本地备份档案名为'{name}'",
+ "backup_archive_name_exists": "具有该名称的备份存档已经存在。",
+ "backup_archive_broken_link": "无法访问备份存档(指向{path}的链接断开)",
+ "backup_archive_app_not_found": "在备份档案中找不到 {app}",
+ "backup_applying_method_tar": "创建备份TAR存档...",
+ "backup_applying_method_custom": "调用自定义备份方法'{method}'...",
+ "backup_applying_method_copy": "正在将所有文件复制到备份...",
+ "backup_app_failed": "无法备份{app}",
+ "backup_actually_backuping": "根据收集的文件创建备份档案...",
+ "backup_abstract_method": "此备份方法尚未实现",
+ "ask_password": "密码",
+ "ask_new_path": "新路径",
+ "ask_new_domain": "新域名",
+ "ask_new_admin_password": "新的管理密码",
+ "ask_main_domain": "主域",
+ "ask_firstname": "名",
+ "ask_user_domain": "用户的电子邮件地址和XMPP帐户要使用的域",
+ "apps_catalog_update_success": "应用程序目录已更新!",
+ "apps_catalog_obsolete_cache": "应用程序目录缓存为空或已过时。",
+ "apps_catalog_failed_to_download": "无法下载{apps_catalog} 应用目录: {error}",
+ "apps_catalog_updating": "正在更新应用程序目录…",
+ "apps_catalog_init_success": "应用目录系统已初始化!",
+ "apps_already_up_to_date": "所有应用程序都是最新的",
+ "app_packaging_format_not_supported": "无法安装此应用,因为您的YunoHost版本不支持其打包格式。 您应该考虑升级系统。",
+ "app_upgraded": "{app}upgraded",
+ "app_upgrade_some_app_failed": "某些应用无法升级",
+ "app_upgrade_script_failed": "应用升级脚本内部发生错误",
+ "app_upgrade_app_name": "现在升级{app} ...",
+ "app_upgrade_several_apps": "以下应用将被升级: {apps}",
+ "app_unsupported_remote_type": "应用程序使用的远程类型不受支持",
+ "app_start_backup": "正在收集要备份的文件,用于{app} ...",
+ "app_start_install": "{app}安装中...",
+ "app_sources_fetch_failed": "无法获取源文件,URL是否正确?",
+ "app_restore_script_failed": "应用还原脚本内部发生错误",
+ "app_restore_failed": "无法还原 {app}: {error}",
+ "app_remove_after_failed_install": "安装失败后删除应用程序...",
+ "app_requirements_unmeet": "{app}不符合要求,软件包{pkgname}({version}) 必须为{spec}",
+ "app_requirements_checking": "正在检查{app}所需的软件包...",
+ "app_removed": "{app} 已删除",
+ "app_not_properly_removed": "{app} 未正确删除",
+ "app_not_correctly_installed": "{app} 似乎安装不正确",
+ "app_not_upgraded": "应用程序'{failed_app}'升级失败,因此以下应用程序的升级已被取消: {apps}",
+ "app_manifest_install_ask_is_public": "该应用是否应该向匿名访问者公开?",
+ "app_manifest_install_ask_admin": "选择此应用的管理员用户",
+ "app_manifest_install_ask_password": "选择此应用的管理密码",
+ "additional_urls_already_removed": "权限'{permission}'的其他URL中已经删除了附加URL'{url}'",
+ "app_manifest_install_ask_path": "选择安装此应用的路径",
+ "app_manifest_install_ask_domain": "选择应安装此应用程序的域",
+ "app_manifest_invalid": "应用清单错误: {error}",
+ "app_location_unavailable": "该URL不可用,或与已安装的应用冲突:\n{apps}",
+ "app_label_deprecated": "不推荐使用此命令!请使用新命令 'yunohost user permission update'来管理应用标签。",
+ "app_make_default_location_already_used": "无法将'{app}' 设置为域上的默认应用,'{other_app}'已在使用'{domain}'",
+ "app_install_script_failed": "应用安装脚本内发生错误",
+ "app_install_failed": "无法安装 {app}: {error}",
+ "app_install_files_invalid": "这些文件无法安装",
+ "additional_urls_already_added": "附加URL '{url}' 已添加到权限'{permission}'的附加URL中",
+ "app_full_domain_unavailable": "抱歉,此应用必须安装在其自己的域中,但其他应用已安装在域“ {domain}”上。 您可以改用专用于此应用程序的子域。",
+ "app_extraction_failed": "无法解压缩安装文件",
+ "app_change_url_success": "{app} URL现在为 {domain}{path}",
+ "app_change_url_no_script": "应用程序'{app_name}'尚不支持URL修改. 也许您应该升级它。",
+ "app_change_url_identical_domains": "新旧domain / url_path是相同的('{domain}{path}'),无需执行任何操作。",
+ "app_argument_required": "参数'{name}'为必填项",
+ "app_argument_password_no_default": "解析密码参数'{name}'时出错:出于安全原因,密码参数不能具有默认值",
+ "app_argument_invalid": "为参数'{name}'选择一个有效值: {error}",
+ "app_argument_choice_invalid": "对参数'{name}'使用以下选项之一'{choices}'",
+ "app_already_up_to_date": "{app} 已经是最新的",
+ "app_already_installed": "{app}已安装",
+ "app_action_broke_system": "该操作似乎破坏了以下重要服务:{services}",
+ "app_action_cannot_be_ran_because_required_services_down": "这些必需的服务应该正在运行以执行以下操作:{services},尝试重新启动它们以继续操作(考虑调查为什么它们出现故障)。",
+ "already_up_to_date": "无事可做。一切都已经是最新的了。",
+ "postinstall_low_rootfsspace": "根文件系统的总空间小于10 GB,这非常令人担忧!您可能很快就会用完磁盘空间!建议根文件系统至少有16GB, 如果尽管出现此警告仍要安装YunoHost,请使用--force-diskspace重新运行postinstall",
+ "port_already_opened": "{ip_version}个连接的端口 {port} 已打开",
+ "port_already_closed": "{ip_version}个连接的端口 {port} 已关闭",
+ "permission_require_account": "权限{permission}只对有账户的用户有意义,因此不能对访客启用。",
+ "permission_protected": "权限{permission}是受保护的。你不能向/从这个权限添加或删除访问者组。",
+ "permission_updated": "权限 '{permission}' 已更新",
+ "permission_update_failed": "无法更新权限 '{permission}': {error}",
+ "permission_not_found": "找不到权限'{permission}'",
+ "permission_deletion_failed": "无法删除权限 '{permission}': {error}",
+ "permission_deleted": "权限'{permission}' 已删除",
+ "permission_cant_add_to_all_users": "权限{permission}不能添加到所有用户。",
+ "regenconf_file_copy_failed": "无法将新的配置文件'{new}' 复制到'{conf}'",
+ "regenconf_file_backed_up": "将配置文件 '{conf}' 备份到 '{backup}'",
+ "regenconf_failed": "无法重新生成类别的配置: {categories}",
+ "regenconf_dry_pending_applying": "正在检查将应用于类别 '{category}'的待定配置…",
+ "regenconf_would_be_updated": "配置已更新为类别 '{category}'",
+ "regenconf_updated": "配置已针对'{category}'进行了更新",
+ "regenconf_now_managed_by_yunohost": "现在,配置文件'{conf}'由YunoHost(类别{category})管理。",
+ "regenconf_file_updated": "配置文件'{conf}' 已更新",
+ "regenconf_file_removed": "配置文件 '{conf}'已删除",
+ "regenconf_file_remove_failed": "无法删除配置文件 '{conf}'",
+ "regenconf_file_manually_removed": "配置文件'{conf}' 已手动删除,因此不会创建",
+ "regenconf_file_manually_modified": "配置文件'{conf}' 已被手动修改,不会被更新",
+ "regenconf_need_to_explicitly_specify_ssh": "ssh配置已被手动修改,但是您需要使用--force明确指定类别“ ssh”才能实际应用更改。",
+ "restore_nothings_done": "什么都没有恢复",
+ "restore_may_be_not_enough_disk_space": "您的系统似乎没有足够的空间(可用空间: {free_space} B,所需空间: {needed_space} B,安全系数: {margin} B)",
+ "restore_hook_unavailable": "'{part}'的恢复脚本在您的系统上和归档文件中均不可用",
+ "restore_failed": "无法还原系统",
+ "restore_extracting": "正在从存档中提取所需文件…",
+ "restore_confirm_yunohost_installed": "您真的要还原已经安装的系统吗? [{answers}]",
+ "restore_complete": "恢复完成",
+ "restore_cleaning_failed": "无法清理临时还原目录",
+ "restore_backup_too_old": "无法还原此备份存档,因为它来自过旧的YunoHost版本。",
+ "restore_already_installed_apps": "以下应用已安装,因此无法还原: {apps}",
+ "restore_already_installed_app": "已安装ID为'{app}' 的应用",
+ "regex_with_only_domain": "您不能将正则表达式用于域,而只能用于路径",
+ "regex_incompatible_with_tile": "/!\\ 打包者!权限“ {permission}”的show_tile设置为“ true”,因此您不能将正则表达式URL定义为主URL",
+ "service_cmd_exec_failed": "无法执行命令'{command}'",
+ "service_already_stopped": "服务'{service}'已被停止",
+ "service_already_started": "服务'{service}' 已在运行",
+ "service_added": "服务 '{service}'已添加",
+ "service_add_failed": "无法添加服务 '{service}'",
+ "server_reboot_confirm": "服务器会立即重启,确定吗? [{answers}]",
+ "server_reboot": "服务器将重新启动",
+ "server_shutdown_confirm": "服务器会立即关闭,确定吗?[{answers}]",
+ "server_shutdown": "服务器将关闭",
+ "root_password_replaced_by_admin_password": "您的root密码已替换为您的管理员密码。",
+ "root_password_desynchronized": "管理员密码已更改,但是YunoHost无法将此密码传播到root密码!",
+ "restore_system_part_failed": "无法还原 '{part}'系统部分",
+ "restore_running_hooks": "运行修复挂钩…",
+ "restore_running_app_script": "正在还原应用'{app}'…",
+ "restore_removing_tmp_dir_failed": "无法删除旧的临时目录",
+ "service_description_yunohost-firewall": "管理打开和关闭服务的连接端口",
+ "service_description_yunohost-api": "管理YunoHost Web界面与系统之间的交互",
+ "service_description_ssh": "允许您通过终端(SSH协议)远程连接到服务器",
+ "service_description_slapd": "存储用户、域名和相关信息",
+ "service_description_rspamd": "过滤垃圾邮件和其他与电子邮件相关的功能",
+ "service_description_redis-server": "用于快速数据访问,任务队列和程序之间通信的专用数据库",
+ "service_description_postfix": "用于发送和接收电子邮件",
+ "service_description_php7.3-fpm": "使用NGINX运行用PHP编写的应用程序",
+ "service_description_nginx": "为你的服务器上托管的所有网站提供服务或访问",
+ "service_description_mysql": "存储应用程序数据(SQL数据库)",
+ "service_description_metronome": "管理XMPP即时消息传递帐户",
+ "service_description_fail2ban": "防止来自互联网的暴力攻击和其他类型的攻击",
+ "service_description_dovecot": "允许电子邮件客户端访问/获取电子邮件(通过IMAP和POP3)",
+ "service_description_dnsmasq": "处理域名解析(DNS)",
+ "service_started": "服务 '{service}' 已启动",
+ "service_start_failed": "无法启动服务 '{service}'\n\n最近的服务日志:{logs}",
+ "service_reloaded_or_restarted": "服务'{service}'已重新加载或重新启动",
+ "service_reload_or_restart_failed": "无法重新加载或重新启动服务'{service}'\n\n最近的服务日志:{logs}",
+ "service_restarted": "服务'{service}' 已重新启动",
+ "service_restart_failed": "无法重新启动服务 '{service}'\n\n最近的服务日志:{logs}",
+ "service_reloaded": "服务 '{service}' 已重新加载",
+ "service_reload_failed": "无法重新加载服务'{service}'\n\n最近的服务日志:{logs}",
+ "service_removed": "服务 '{service}' 已删除",
+ "service_remove_failed": "无法删除服务'{service}'",
+ "service_regen_conf_is_deprecated": "不建议使用'yunohost service regen-conf' ! 请改用'yunohost tools regen-conf'。",
+ "service_enabled": "现在,服务'{service}' 将在系统引导过程中自动启动。",
+ "service_enable_failed": "无法使服务 '{service}'在启动时自动启动。\n\n最近的服务日志:{logs}",
+ "service_disabled": "系统启动时,服务 '{service}' 将不再启动。",
+ "service_disable_failed": "服务'{service}'在启动时无法启动。\n\n最近的服务日志:{logs}",
+ "tools_upgrade_regular_packages": "现在正在升级 'regular' (与yunohost无关)的软件包…",
+ "tools_upgrade_cant_unhold_critical_packages": "无法解压关键软件包…",
+ "tools_upgrade_cant_hold_critical_packages": "无法保存重要软件包…",
+ "tools_upgrade_cant_both": "无法同时升级系统和应用程序",
+ "tools_upgrade_at_least_one": "请指定'apps', 或 'system'",
+ "this_action_broke_dpkg": "此操作破坏了dpkg / APT(系统软件包管理器)...您可以尝试通过SSH连接并运行`sudo apt install --fix-broken`和/或`sudo dpkg --configure -a`来解决此问题。",
+ "system_username_exists": "用户名已存在于系统用户列表中",
+ "system_upgraded": "系统升级",
+ "ssowat_conf_updated": "SSOwat配置已更新",
+ "ssowat_conf_generated": "SSOwat配置已重新生成",
+ "show_tile_cant_be_enabled_for_regex": "你不能启用'show_tile',因为权限'{permission}'的URL是一个重合词",
+ "show_tile_cant_be_enabled_for_url_not_defined": "您现在无法启用 'show_tile' ,因为您必须先为权限'{permission}'定义一个URL",
+ "service_unknown": "未知服务 '{service}'",
+ "service_stopped": "服务'{service}' 已停止",
+ "service_stop_failed": "无法停止服务'{service}'\n\n最近的服务日志:{logs}",
+ "upnp_dev_not_found": "找不到UPnP设备",
+ "upgrading_packages": "升级程序包...",
+ "upgrade_complete": "升级完成",
+ "updating_apt_cache": "正在获取系统软件包的可用升级...",
+ "update_apt_cache_warning": "更新APT缓存(Debian的软件包管理器)时出了点问题。这是sources.list行的转储,这可能有助于确定有问题的行:\n{sourceslist}",
+ "update_apt_cache_failed": "无法更新APT的缓存(Debian的软件包管理器)。这是sources.list行的转储,这可能有助于确定有问题的行:\n{sourceslist}",
+ "unrestore_app": "{app} 将不会恢复",
+ "unlimit": "没有配额",
+ "unknown_main_domain_path": "'{app}'的域或路径未知。您需要指定一个域和一个路径,以便能够指定用于许可的URL。",
+ "unexpected_error": "出乎意料的错误: {error}",
+ "unbackup_app": "{app} 将不会保存",
+ "tools_upgrade_special_packages_completed": "YunoHost软件包升级完成。\n按[Enter]返回命令行",
+ "tools_upgrade_special_packages_explanation": "特殊升级将在后台继续。请在接下来的10分钟内(取决于硬件速度)在服务器上不要执行任何其他操作。此后,您可能必须重新登录Webadmin。升级日志将在“工具”→“日志”(在Webadmin中)或使用'yunohost log list'(从命令行)中可用。",
+ "tools_upgrade_special_packages": "现在正在升级'special'(与yunohost相关的)程序包…",
+ "tools_upgrade_regular_packages_failed": "无法升级软件包: {packages_list}",
+ "yunohost_installing": "正在安装YunoHost ...",
+ "yunohost_configured": "现在已配置YunoHost",
+ "yunohost_already_installed": "YunoHost已经安装",
+ "user_updated": "用户信息已更改",
+ "user_update_failed": "无法更新用户{user}: {error}",
+ "user_unknown": "未知用户: {user}",
+ "user_home_creation_failed": "无法为用户创建'home'文件夹",
+ "user_deletion_failed": "无法删除用户 {user}: {error}",
+ "user_deleted": "用户已删除",
+ "user_creation_failed": "无法创建用户 {user}: {error}",
+ "user_created": "用户创建",
+ "user_already_exists": "用户'{user}' 已存在",
+ "upnp_port_open_failed": "无法通过UPnP打开端口",
+ "upnp_enabled": "UPnP已启用",
+ "upnp_disabled": "UPnP已禁用",
+ "yunohost_not_installed": "YunoHost没有正确安装,请运行 'yunohost tools postinstall'",
+ "yunohost_postinstall_end_tip": "后期安装完成! 为了最终完成你的设置,请考虑:\n -通过webadmin的“用户”部分添加第一个用户(或在命令行中'yunohost user create ' );\n -通过网络管理员的“诊断”部分(或命令行中的'yunohost diagnosis run')诊断潜在问题;\n -阅读管理文档中的“完成安装设置”和“了解YunoHost”部分: https://yunohost.org/admindoc.",
+ "operation_interrupted": "该操作是否被手动中断?",
+ "invalid_regex": "无效的正则表达式:'{regex}'",
+ "installation_complete": "安装完成",
+ "hook_name_unknown": "未知的钩子名称 '{name}'",
+ "hook_list_by_invalid": "此属性不能用于列出钩子",
+ "hook_json_return_error": "无法读取来自钩子 {path}的返回,错误: {msg}。原始内容: {raw_content}",
+ "hook_exec_not_terminated": "脚本未正确完成: {path}",
+ "hook_exec_failed": "无法运行脚本: {path}",
+ "group_user_not_in_group": "用户{user}不在组{group}中",
+ "group_user_already_in_group": "用户{user}已在组{group}中",
+ "group_update_failed": "无法更新群组'{group}': {error}",
+ "group_updated": "群组 '{group}' 已更新",
+ "group_unknown": "群组 '{group}' 未知",
+ "group_deletion_failed": "无法删除群组'{group}': {error}",
+ "group_deleted": "群组'{group}' 已删除",
+ "group_cannot_be_deleted": "无法手动删除组{group}。",
+ "group_cannot_edit_primary_group": "不能手动编辑 '{group}' 组。它是旨在仅包含一个特定用户的主要组。",
+ "group_cannot_edit_visitors": "组“访客”不能手动编辑。这是一个代表匿名访问者的特殊小组",
+ "group_cannot_edit_all_users": "组“ all_users”不能手动编辑。这是一个特殊的组,旨在包含所有在YunoHost中注册的用户",
+ "group_creation_failed": "无法创建组'{group}': {error}",
+ "group_created": "创建了 '{group}'组",
+ "group_already_exist_on_system_but_removing_it": "系统组中已经存在组{group},但是YunoHost会将其删除...",
+ "group_already_exist_on_system": "系统组中已经存在组{group}",
+ "group_already_exist": "群组{group}已经存在",
+ "good_practices_about_admin_password": "现在,您将设置一个新的管理员密码。 密码至少应包含8个字符。并且出于安全考虑建议使用较长的密码同时尽可能使用各种字符(大写,小写,数字和特殊字符)。",
+ "global_settings_unknown_type": "意外的情况,设置{setting}似乎具有类型 {unknown_type} ,但是系统不支持该类型。",
+ "global_settings_setting_backup_compress_tar_archives": "创建新备份时,请压缩档案(.tar.gz) ,而不要压缩未压缩的档案(.tar)。注意:启用此选项意味着创建较小的备份存档,但是初始备份过程将明显更长且占用大量CPU。",
+ "global_settings_setting_smtp_relay_password": "SMTP中继主机密码",
+ "global_settings_setting_smtp_relay_user": "SMTP中继用户帐户",
+ "global_settings_setting_smtp_relay_port": "SMTP中继端口",
+ "global_settings_setting_smtp_allow_ipv6": "允许使用IPv6接收和发送邮件",
+ "global_settings_setting_ssowat_panel_overlay_enabled": "启用SSOwat面板覆盖",
+ "global_settings_setting_service_ssh_allow_deprecated_dsa_hostkey": "允许使用DSA主机密钥进行SSH守护程序配置(不建议使用)",
+ "global_settings_unknown_setting_from_settings_file": "设置中的未知密钥:'{setting_key}',将其丢弃并保存在/etc/yunohost/settings-unknown.json中",
+ "global_settings_setting_security_ssh_port": "SSH端口",
+ "global_settings_setting_security_postfix_compatibility": "Postfix服务器的兼容性与安全性的权衡。影响密码(以及其他与安全性有关的方面)",
+ "global_settings_setting_security_ssh_compatibility": "SSH服务器的兼容性与安全性的权衡。影响密码(以及其他与安全性有关的方面)",
+ "global_settings_setting_security_password_user_strength": "用户密码强度",
+ "global_settings_setting_security_password_admin_strength": "管理员密码强度",
+ "global_settings_setting_security_nginx_compatibility": "Web服务器NGINX的兼容性与安全性的权衡,影响密码(以及其他与安全性有关的方面)",
+ "global_settings_setting_pop3_enabled": "为邮件服务器启用POP3协议",
+ "global_settings_reset_success": "以前的设置现在已经备份到{path}",
+ "global_settings_key_doesnt_exists": "全局设置中不存在键'{settings_key}',您可以通过运行 'yunohost settings list'来查看所有可用键",
+ "global_settings_cant_write_settings": "无法保存设置文件,原因: {reason}",
+ "global_settings_cant_serialize_settings": "无法序列化设置数据,原因: {reason}",
+ "global_settings_cant_open_settings": "无法打开设置文件,原因: {reason}",
+ "global_settings_bad_type_for_setting": "设置 {setting},的类型错误,已收到{received_type},预期{expected_type}",
+ "global_settings_bad_choice_for_enum": "设置 {setting}的错误选择,收到了 '{choice}',但可用的选择有: {available_choices}",
+ "firewall_rules_cmd_failed": "某些防火墙规则命令失败。日志中的更多信息。",
+ "firewall_reloaded": "重新加载防火墙",
+ "firewall_reload_failed": "无法重新加载防火墙",
+ "file_does_not_exist": "文件{path} 不存在。",
+ "field_invalid": "无效的字段'{}'",
+ "experimental_feature": "警告:此功能是实验性的,不稳定,请不要使用它,除非您知道自己在做什么。",
+ "extracting": "提取中...",
+ "dyndns_unavailable": "域'{domain}' 不可用。",
+ "dyndns_domain_not_provided": "DynDNS提供者 {provider} 无法提供域 {domain}。",
+ "dyndns_registration_failed": "无法注册DynDNS域: {error}",
+ "dyndns_registered": "DynDNS域已注册",
+ "dyndns_provider_unreachable": "无法联系DynDNS提供者 {provider}: 您的YunoHost未正确连接到Internet或dynette服务器已关闭。",
+ "dyndns_no_domain_registered": "没有在DynDNS中注册的域",
+ "dyndns_key_not_found": "找不到该域的DNS密钥",
+ "dyndns_key_generating": "正在生成DNS密钥...可能需要一段时间。",
+ "dyndns_ip_updated": "在DynDNS上更新了您的IP",
+ "dyndns_ip_update_failed": "无法将IP地址更新到DynDNS",
+ "dyndns_could_not_check_available": "无法检查{provider}上是否可用 {domain}。",
+ "dyndns_could_not_check_provide": "无法检查{provider}是否可以提供 {domain}.",
+ "dpkg_lock_not_available": "该命令现在无法运行,因为另一个程序似乎正在使用dpkg锁(系统软件包管理器)",
+ "dpkg_is_broken": "您现在不能执行此操作,因为dpkg / APT(系统软件包管理器)似乎处于损坏状态……您可以尝试通过SSH连接并运行sudo apt install --fix-broken和/或 sudo dpkg --configure-a 来解决此问题.",
+ "downloading": "下载中…",
+ "done": "完成",
+ "domains_available": "可用域:",
+ "domain_name_unknown": "域'{domain}'未知",
+ "domain_uninstall_app_first": "这些应用程序仍安装在您的域中:\n{apps}\n\n请先使用 'yunohost app remove the_app_id' 将其卸载,或使用 'yunohost app change-url the_app_id'将其移至另一个域,然后再继续删除域",
+ "domain_remove_confirm_apps_removal": "删除该域将删除这些应用程序:\n{apps}\n\n您确定要这样做吗? [{answers}]",
+ "domain_hostname_failed": "无法设置新的主机名。稍后可能会引起问题(可能没问题)。",
+ "domain_exists": "该域已存在",
+ "domain_dyndns_root_unknown": "未知的DynDNS根域",
+ "domain_dyndns_already_subscribed": "您已经订阅了DynDNS域",
+ "domain_dns_conf_is_just_a_recommendation": "本页向你展示了*推荐的*配置。它并*不*为你配置DNS。你有责任根据该建议在你的DNS注册商处配置你的DNS区域。",
+ "domain_deletion_failed": "无法删除域 {domain}: {error}",
+ "domain_deleted": "域已删除",
+ "domain_creation_failed": "无法创建域 {domain}: {error}",
+ "domain_created": "域已创建",
+ "domain_cert_gen_failed": "无法生成证书",
+ "diagnosis_sshd_config_inconsistent": "看起来SSH端口是在/etc/ssh/sshd_config中手动修改, 从YunoHost 4.2开始,可以使用新的全局设置“ security.ssh.port”来避免手动编辑配置。",
+ "diagnosis_sshd_config_insecure": "SSH配置似乎已被手动修改,并且是不安全的,因为它不包含“ AllowGroups”或“ AllowUsers”指令以限制对授权用户的访问。",
+ "diagnosis_processes_killed_by_oom_reaper": "该系统最近杀死了某些进程,因为内存不足。这通常是系统内存不足或进程占用大量内存的征兆。 杀死进程的摘要:\n{kills_summary}",
+ "diagnosis_never_ran_yet": "看来这台服务器是最近安装的,还没有诊断报告可以显示。您应该首先从Web管理员运行完整的诊断,或者从命令行使用'yunohost diagnosis run' 。",
+ "diagnosis_unknown_categories": "以下类别是未知的: {categories}",
+ "diagnosis_http_nginx_conf_not_up_to_date_details": "要解决这种情况,请使用yunohost tools regen-conf nginx --dry-run --with-diff ,如果还可以,请使用yunohost tools regen-conf nginx --force 应用更改。",
+ "diagnosis_http_nginx_conf_not_up_to_date": "该域的nginx配置似乎已被手动修改,并阻止YunoHost诊断它是否可以在HTTP上访问。",
+ "diagnosis_http_partially_unreachable": "尽管域{domain}可以在 IPv{failed}中工作,但它似乎无法通过HTTP从外部网络通过HTTP到达IPv{passed}。",
+ "diagnosis_mail_outgoing_port_25_blocked_details": "您应该首先尝试在Internet路由器界面或主机提供商界面中取消阻止传出端口25。(某些托管服务提供商可能会要求您为此发送支持请求)。",
+ "diagnosis_mail_outgoing_port_25_blocked": "由于传出端口25在IPv{ipversion}中被阻止,因此SMTP邮件服务器无法向其他服务器发送电子邮件。",
+ "diagnosis_mail_outgoing_port_25_ok": "SMTP邮件服务器能够发送电子邮件(未阻止出站端口25)。",
+ "diagnosis_swap_tip": "请注意,如果服务器在SD卡或SSD存储器上托管交换,则可能会大大缩短设备的预期寿命。",
+ "diagnosis_swap_ok": "系统有{total}个交换!",
+ "diagnosis_swap_notsomuch": "系统只有{total}个交换。您应该考虑至少使用{recommended},以避免系统内存不足的情况。",
+ "diagnosis_swap_none": "系统根本没有交换分区。您应该考虑至少添加{recommended}交换,以避免系统内存不足的情况。",
+ "diagnosis_http_unreachable": "网域{domain}从本地网络外通过HTTP无法访问。",
+ "diagnosis_http_connection_error": "连接错误:无法连接到请求的域,很可能无法访问。",
+ "diagnosis_http_ok": "域{domain}可以通过HTTP从本地网络外部访问。",
+ "diagnosis_http_could_not_diagnose_details": "错误: {error}",
+ "diagnosis_http_could_not_diagnose": "无法诊断域是否可以从IPv{ipversion}中从外部访问。",
+ "diagnosis_http_hairpinning_issue_details": "这可能是由于您的ISP 光猫/路由器。因此,使用域名或全局IP时,来自本地网络外部的人员将能够按预期访问您的服务器,但无法访问来自本地网络内部的人员(可能与您一样)。您可以通过查看 https://yunohost.org/dns_local_network 来改善这种情况",
+ "diagnosis_http_hairpinning_issue": "您的本地网络似乎没有启用NAT回环功能。",
+ "diagnosis_ports_forwarding_tip": "要解决此问题,您很可能需要按照 https://yunohost.org/isp_box_config 中的说明,在Internet路由器上配置端口转发",
+ "diagnosis_ports_needed_by": "{category}功能(服务{service})需要公开此端口",
+ "diagnosis_ports_ok": "可以从外部访问端口{port}。",
+ "diagnosis_ports_partially_unreachable": "无法从外部通过IPv{failed}访问端口{port}。",
+ "diagnosis_ports_unreachable": "无法从外部访问端口{port}。",
+ "diagnosis_ports_could_not_diagnose_details": "错误: {error}",
+ "diagnosis_ports_could_not_diagnose": "无法诊断端口在IPv{ipversion}中是否可以从外部访问。",
+ "diagnosis_description_regenconf": "系统配置",
+ "diagnosis_description_mail": "电子邮件",
+ "diagnosis_description_web": "网页",
+ "diagnosis_description_ports": "开放端口",
+ "diagnosis_description_systemresources": "系统资源",
+ "diagnosis_description_services": "服务状态检查",
+ "diagnosis_description_dnsrecords": "DNS记录",
+ "diagnosis_description_ip": "互联网连接",
+ "diagnosis_description_basesystem": "基本系统",
+ "diagnosis_security_vulnerable_to_meltdown_details": "要解决此问题,您应该升级系统并重新启动以加载新的Linux内核(如果无法使用,请与您的服务器提供商联系)。有关更多信息,请参见https://meltdownattack.com/。",
+ "diagnosis_security_vulnerable_to_meltdown": "你似乎容易受到Meltdown关键安全漏洞的影响",
+ "diagnosis_regenconf_manually_modified": "配置文件 {file}
似乎已被手动修改。",
+ "diagnosis_regenconf_allgood": "所有配置文件均符合建议的配置!",
+ "diagnosis_mail_queue_too_big": "邮件队列中的待处理电子邮件过多({nb_pending} emails)",
+ "diagnosis_mail_queue_unavailable_details": "错误: {error}",
+ "diagnosis_mail_queue_unavailable": "无法查询队列中待处理电子邮件的数量",
+ "diagnosis_mail_queue_ok": "邮件队列中有{nb_pending} 个待处理的电子邮件",
+ "diagnosis_mail_blacklist_website": "确定列出的原因并加以修复后,请随时在{blacklist_website}上要求删除您的IP或域名",
+ "diagnosis_mail_blacklist_reason": "黑名单的原因是: {reason}",
+ "diagnosis_mail_blacklist_listed_by": "您的IP或域{item}
已在{blacklist_name}上列入黑名单",
+ "diagnosis_mail_blacklist_ok": "该服务器使用的IP和域似乎未列入黑名单",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "当前反向DNS值为: {rdns_domain}
期待值:{ehlo_domain}
",
+ "diagnosis_mail_fcrdns_different_from_ehlo_domain": "反向DNS未在 IPv{ipversion}中正确配置。某些电子邮件可能无法传递或可能被标记为垃圾邮件。",
+ "diagnosis_mail_fcrdns_nok_details": "您应该首先尝试在Internet路由器界面或托管服务提供商界面中使用{ehlo_domain}
配置反向DNS。(某些托管服务提供商可能会要求您为此发送支持票)。",
+ "diagnosis_mail_fcrdns_dns_missing": "IPv{ipversion}中未定义反向DNS。某些电子邮件可能无法传递或可能被标记为垃圾邮件。",
+ "diagnosis_mail_fcrdns_ok": "您的反向DNS已正确配置!",
+ "diagnosis_mail_ehlo_could_not_diagnose_details": "错误: {error}",
+ "diagnosis_mail_ehlo_could_not_diagnose": "无法诊断Postfix邮件服务器是否可以从IPv{ipversion}中从外部访问。",
+ "diagnosis_mail_ehlo_wrong": "不同的SMTP邮件服务器在IPv{ipversion}上进行应答。您的服务器可能无法接收电子邮件。",
+ "diagnosis_mail_ehlo_bad_answer_details": "这可能是由于其他计算机而不是您的服务器在应答。",
+ "diagnosis_mail_ehlo_bad_answer": "一个非SMTP服务在IPv{ipversion}的25端口应答",
+ "diagnosis_mail_ehlo_unreachable": "SMTP邮件服务器在IPv{ipversion}上无法从外部访问。它将无法接收电子邮件。",
+ "diagnosis_mail_ehlo_ok": "SMTP邮件服务器可以从外部访问,因此可以接收电子邮件!",
+ "diagnosis_services_bad_status": "服务{service}为 {status} :(",
+ "diagnosis_services_conf_broken": "服务{service}的配置已损坏!",
+ "diagnosis_services_running": "服务{service}正在运行!",
+ "diagnosis_domain_expires_in": "{domain}在{days}天后到期。",
+ "diagnosis_domain_expiration_error": "有些域很快就会过期!",
+ "diagnosis_domain_expiration_warning": "一些域即将过期!",
+ "diagnosis_domain_expiration_success": "您的域已注册,并且不会很快过期。",
+ "diagnosis_domain_expiration_not_found_details": "域{domain}的WHOIS信息似乎不包含有关到期日期的信息?",
+ "diagnosis_domain_not_found_details": "域{domain}在WHOIS数据库中不存在或已过期!",
+ "diagnosis_domain_expiration_not_found": "无法检查某些域的到期日期",
+ "diagnosis_dns_missing_record": "根据建议的DNS配置,您应该添加带有以下信息的DNS记录。
类型:{type}
名称:{name}
值:{value}
",
+ "diagnosis_dns_bad_conf": "域{domain}(类别{category})的某些DNS记录丢失或不正确",
+ "diagnosis_dns_good_conf": "已为域{domain}(类别{category})正确配置了DNS记录",
+ "diagnosis_ip_weird_resolvconf_details": "文件 /etc/resolv.conf
应该是指向 /etc/resolvconf/run/resolv.conf
本身的符号链接,指向 127.0.0.1
(dnsmasq)。如果要手动配置DNS解析器,请编辑 /etc/resolv.dnsmasq.conf
。",
+ "diagnosis_ip_weird_resolvconf": "DNS解析似乎可以正常工作,但是您似乎正在使用自定义的 /etc/resolv.conf
。",
+ "diagnosis_ip_broken_resolvconf": "域名解析在您的服务器上似乎已损坏,这似乎与 /etc/resolv.conf
有关,但未指向 127.0.0.1
。",
+ "diagnosis_ip_broken_dnsresolution": "域名解析似乎由于某种原因而被破坏...防火墙是否阻止了DNS请求?",
+ "diagnosis_ip_dnsresolution_working": "域名解析正常!",
+ "diagnosis_ip_not_connected_at_all": "服务器似乎根本没有连接到Internet?",
+ "diagnosis_ip_local": "本地IP:{local}
",
+ "diagnosis_ip_global": "全局IP: {global}
",
+ "diagnosis_ip_no_ipv6_tip": "正常运行的IPv6并不是服务器正常运行所必需的,但是对于整个Internet的健康而言,则更好。通常,IPv6应该由系统或您的提供商自动配置(如果可用)。否则,您可能需要按照此处的文档中的说明手动配置一些内容: https://yunohost.org/#/ipv6。如果您无法启用IPv6或对您来说太过困难,也可以安全地忽略此警告。",
+ "diagnosis_ip_no_ipv6": "服务器没有可用的IPv6。",
+ "diagnosis_ip_connected_ipv6": "服务器通过IPv6连接到Internet!",
+ "diagnosis_ip_no_ipv4": "服务器没有可用的IPv4。",
+ "diagnosis_ip_connected_ipv4": "服务器通过IPv4连接到Internet!",
+ "diagnosis_no_cache": "尚无类别 '{category}'的诊断缓存",
+ "diagnosis_failed": "无法获取类别 '{category}'的诊断结果: {error}",
+ "diagnosis_package_installed_from_sury_details": "一些软件包被无意中从一个名为Sury的第三方仓库安装。YunoHost团队改进了处理这些软件包的策略,但预计一些安装了PHP7.3应用程序的设置在仍然使用Stretch的情况下还有一些不一致的地方。为了解决这种情况,你应该尝试运行以下命令:{cmd_to_fix} ",
+ "app_not_installed": "在已安装的应用列表中找不到 {app}:{all_apps}",
+ "app_already_installed_cant_change_url": "这个应用程序已经被安装。URL不能仅仅通过这个函数来改变。在`app changeurl`中检查是否可用。",
+ "restore_not_enough_disk_space": "没有足够的空间(空间: {free_space} B,需要的空间: {needed_space} B,安全系数: {margin} B)",
+ "regenconf_pending_applying": "正在为类别'{category}'应用挂起的配置..",
+ "regenconf_up_to_date": "类别'{category}'的配置已经是最新的",
+ "regenconf_file_kept_back": "配置文件'{conf}'预计将被regen-conf(类别{category})删除,但被保留了下来。",
+ "good_practices_about_user_password": "现在,您将设置一个新的管理员密码。 密码至少应包含8个字符。并且出于安全考虑建议使用较长的密码同时尽可能使用各种字符(大写,小写,数字和特殊字符)",
+ "global_settings_setting_smtp_relay_host": "使用SMTP中继主机来代替这个YunoHost实例发送邮件。如果你有以下情况,就很有用:你的25端口被你的ISP或VPS提供商封锁,你有一个住宅IP列在DUHL上,你不能配置反向DNS,或者这个服务器没有直接暴露在互联网上,你想使用其他服务器来发送邮件。",
+ "domain_cannot_remove_main_add_new_one": "你不能删除'{domain}',因为它是主域和你唯一的域,你需要先用'yunohost domain add '添加另一个域,然后用'yunohost domain main-domain -n '设置为主域,然后你可以用'yunohost domain remove {domain}'删除域",
+ "domain_cannot_add_xmpp_upload": "你不能添加以'xmpp-upload.'开头的域名。这种名称是为YunoHost中集成的XMPP上传功能保留的。",
+ "domain_cannot_remove_main": "你不能删除'{domain}',因为它是主域,你首先需要用'yunohost domain main-domain -n '设置另一个域作为主域;这里是候选域的列表: {other_domains}",
+ "diagnosis_sshd_config_inconsistent_details": "请运行yunohost settings set security.ssh.port -v YOUR_SSH_PORT 来定义SSH端口,并检查yunohost tools regen-conf ssh --dry-run --with-diff 和yunohost tools regen-conf ssh --force 将您的配置重置为YunoHost建议。",
+ "diagnosis_http_bad_status_code": "它看起来像另一台机器(也许是你的互联网路由器)回答,而不是你的服务器。
1。这个问题最常见的原因是80端口(和443端口)没有正确转发到您的服务器。
2.在更复杂的设置中:确保没有防火墙或反向代理的干扰。",
+ "diagnosis_http_timeout": "当试图从外部联系你的服务器时,出现了超时。它似乎是不可达的。
1. 这个问题最常见的原因是80端口(和443端口)没有正确转发到你的服务器。
2.你还应该确保nginx服务正在运行
3.对于更复杂的设置:确保没有防火墙或反向代理的干扰。",
+ "diagnosis_rootfstotalspace_critical": "根文件系统总共只有{space},这很令人担忧!您可能很快就会用完磁盘空间!建议根文件系统至少有16 GB。",
+ "diagnosis_rootfstotalspace_warning": "根文件系统总共只有{space}。这可能没问题,但要小心,因为最终您可能很快会用完磁盘空间...建议根文件系统至少有16 GB。",
+ "diagnosis_regenconf_manually_modified_details": "如果你知道自己在做什么的话,这可能是可以的! YunoHost会自动停止更新这个文件... 但是请注意,YunoHost的升级可能包含重要的推荐变化。如果你想,你可以用yunohost tools regen-conf {category} --dry-run --with-diff 检查差异,然后用yunohost tools regen-conf {category} --force 强制设置为推荐配置",
+ "diagnosis_mail_fcrdns_nok_alternatives_6": "有些供应商不会让你配置你的反向DNS(或者他们的功能可能被破坏......)。如果你的反向DNS正确配置为IPv4,你可以尝试在发送邮件时禁用IPv6,方法是运yunohost settings set smtp.allow_ipv6 -v off 。注意:这应视为最后一个解决方案因为这意味着你将无法从少数只使用IPv6的服务器发送或接收电子邮件。",
+ "diagnosis_mail_fcrdns_nok_alternatives_4": "有些供应商不会让你配置你的反向DNS(或者他们的功能可能被破坏......)。如果您因此而遇到问题,请考虑以下解决方案:
- 一些ISP提供了使用邮件服务器中转的选择,尽管这意味着中转将能够监视您的电子邮件流量。
- 一个有利于隐私的选择是使用VPN*与专用公共IP*来绕过这类限制。见https://yunohost.org/#/vpn_advantage
- 或者可以切换到另一个供应商",
+ "diagnosis_mail_ehlo_wrong_details": "远程诊断器在IPv{ipversion}中收到的EHLO与你的服务器的域名不同。
收到的EHLO: {wrong_ehlo}
预期的: {right_ehlo}
这个问题最常见的原因是端口25没有正确转发到你的服务器。另外,请确保没有防火墙或反向代理的干扰。",
+ "diagnosis_mail_ehlo_unreachable_details": "在IPv{ipversion}中无法打开与您服务器的25端口连接。它似乎是不可达的。
1. 这个问题最常见的原因是端口25没有正确转发到你的服务器。
2.你还应该确保postfix服务正在运行。
3.在更复杂的设置中:确保没有防火墙或反向代理的干扰。",
+ "diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "一些供应商不会让你解除对出站端口25的封锁,因为他们不关心网络中立性。
- 其中一些供应商提供了使用邮件服务器中继的替代方案,尽管这意味着中继将能够监视你的电子邮件流量。
- 一个有利于隐私的替代方案是使用VPN*,用一个专用的公共IP*绕过这种限制。见https://yunohost.org/#/vpn_advantage
- 你也可以考虑切换到一个更有利于网络中立的供应商",
+ "diagnosis_ram_ok": "系统在{total}中仍然有 {available} ({available_percent}%) RAM可用。",
+ "diagnosis_ram_low": "系统有 {available} ({available_percent}%) RAM可用(共{total}个)可用。小心。",
+ "diagnosis_ram_verylow": "系统只有 {available} ({available_percent}%) 内存可用! (在{total}中)",
+ "diagnosis_diskusage_ok": "存储器{mountpoint}
(在设备{device}
上)仍有 {free} ({free_percent}%) 空间(在{total}中)!",
+ "diagnosis_diskusage_low": "存储器{mountpoint}
(在设备{device}
上)只有{free} ({free_percent}%) 的空间。({free_percent}%)的剩余空间(在{total}中)。要小心。",
+ "diagnosis_diskusage_verylow": "存储器{mountpoint}
(在设备{device}
上)仅剩余{free} ({free_percent}%) (剩余{total})个空间。您应该真正考虑清理一些空间!",
+ "diagnosis_services_bad_status_tip": "你可以尝试重新启动服务,如果没有效果,可以看看webadmin中的服务日志(从命令行,你可以用yunohost service restart {service} 和yunohost service log {service} )来做。",
+ "diagnosis_dns_try_dyndns_update_force": "该域的DNS配置应由YunoHost自动管理,如果不是这种情况,您可以尝试使用 yunohost dyndns update --force 强制进行更新。",
+ "diagnosis_dns_point_to_doc": "如果您需要有关配置DNS记录的帮助,请查看 https://yunohost.org/dns_config 上的文档。",
+ "diagnosis_dns_discrepancy": "以下DNS记录似乎未遵循建议的配置:
类型: {type}
名称: {name}
代码> 当前值: {current}期望值: {value}
",
+ "log_backup_create": "创建备份档案",
+ "log_available_on_yunopaste": "现在可以通过{url}使用此日志",
+ "log_app_action_run": "运行 '{}' 应用的操作",
+ "log_app_makedefault": "将 '{}' 设为默认应用",
+ "log_app_upgrade": "升级 '{}' 应用",
+ "log_app_remove": "删除 '{}' 应用",
+ "log_app_install": "安装 '{}' 应用",
+ "log_app_change_url": "更改'{}'应用的网址",
+ "log_operation_unit_unclosed_properly": "操作单元未正确关闭",
+ "log_does_exists": "没有名称为'{log}'的操作日志,请使用 'yunohost log list' 查看所有可用的操作日志",
+ "log_help_to_get_failed_log": "操作'{desc}'无法完成。请使用命令'yunohost log share {name}' 共享此操作的完整日志以获取帮助",
+ "log_link_to_failed_log": "无法完成操作 '{desc}'。请通过单击此处提供此操作的完整日志以获取帮助",
+ "log_help_to_get_log": "要查看操作'{desc}'的日志,请使用命令'yunohost log show {name}'",
+ "log_link_to_log": "此操作的完整日志: '{desc}'",
+ "log_corrupted_md_file": "与日志关联的YAML元数据文件已损坏: '{md_file}\n错误: {error}'",
+ "iptables_unavailable": "你不能在这里使用iptables。你要么在一个容器中,要么你的内核不支持它",
+ "ip6tables_unavailable": "你不能在这里使用ip6tables。你要么在一个容器中,要么你的内核不支持它",
+ "log_regen_conf": "重新生成系统配置'{}'",
+ "log_letsencrypt_cert_renew": "续订'{}'的“Let's Encrypt”证书",
+ "log_selfsigned_cert_install": "在 '{}'域上安装自签名证书",
+ "log_permission_url": "更新与权限'{}'相关的网址",
+ "log_permission_delete": "删除权限'{}'",
+ "log_permission_create": "创建权限'{}'",
+ "log_letsencrypt_cert_install": "在'{}'域上安装“Let's Encrypt”证书",
+ "log_dyndns_update": "更新与您的YunoHost子域'{}'关联的IP",
+ "log_dyndns_subscribe": "订阅YunoHost子域'{}'",
+ "log_domain_remove": "从系统配置中删除 '{}' 域",
+ "log_domain_add": "将 '{}'域添加到系统配置中",
+ "log_remove_on_failed_install": "安装失败后删除 '{}'",
+ "log_remove_on_failed_restore": "从备份存档还原失败后,删除 '{}'",
+ "log_backup_restore_app": "从备份存档还原 '{}'",
+ "log_backup_restore_system": "从备份档案还原系统",
+ "permission_currently_allowed_for_all_users": "这个权限目前除了授予其他组以外,还授予所有用户。你可能想删除'all_users'权限或删除目前授予它的其他组。",
+ "permission_creation_failed": "无法创建权限'{permission}': {error}",
+ "permission_created": "权限'{permission}'已创建",
+ "permission_cannot_remove_main": "不允许删除主要权限",
+ "permission_already_up_to_date": "权限没有被更新,因为添加/删除请求已经符合当前状态。",
+ "permission_already_exist": "权限 '{permission}'已存在",
+ "permission_already_disallowed": "群组'{group}'已禁用权限'{permission}'",
+ "permission_already_allowed": "群组 '{group}' 已启用权限'{permission}'",
+ "pattern_password_app": "抱歉,密码不能包含以下字符: {forbidden_chars}",
+ "pattern_username": "只能为小写字母数字和下划线字符",
+ "pattern_port_or_range": "必须是有效的端口号(即0-65535)或端口范围(例如100:200)",
+ "pattern_password": "必须至少3个字符长",
+ "pattern_mailbox_quota": "必须为带b/k/M/G/T 后缀的大小或0,才能没有配额",
+ "pattern_lastname": "必须是有效的姓氏",
+ "pattern_firstname": "必须是有效的名字",
+ "pattern_email": "必须是有效的电子邮件地址,没有'+'符号(例如someone @ example.com)",
+ "pattern_email_forward": "必须是有效的电子邮件地址,接受 '+' 符号(例如someone + tag @ example.com)",
+ "pattern_domain": "必须是有效的域名(例如my-domain.org)",
+ "pattern_backup_archive_name": "必须是一个有效的文件名,最多30个字符,只有-_.和字母数字",
+ "password_too_simple_4": "密码长度至少为12个字符,并且包含数字,大写,小写和特殊字符",
+ "password_too_simple_3": "密码长度至少为8个字符,并且包含数字,大写,小写和特殊字符",
+ "password_too_simple_2": "密码长度至少为8个字符,并且包含数字,大写和小写字符",
+ "password_listed": "该密码是世界上最常用的密码之一。 请选择一些更独特的东西。",
+ "packages_upgrade_failed": "无法升级所有软件包",
+ "invalid_number": "必须是数字",
+ "not_enough_disk_space": "'{path}'上的可用空间不足",
+ "migrations_to_be_ran_manually": "迁移{id}必须手动运行。请转到webadmin页面上的工具→迁移,或运行`yunohost tools migrations run`。",
+ "migrations_success_forward": "迁移 {id} 已完成",
+ "migrations_skip_migration": "正在跳过迁移{id}...",
+ "migrations_running_forward": "正在运行迁移{id}...",
+ "migrations_pending_cant_rerun": "这些迁移仍处于待处理状态,因此无法再次运行: {ids}",
+ "migrations_not_pending_cant_skip": "这些迁移没有待处理,因此不能跳过: {ids}",
+ "migrations_no_such_migration": "没有称为 '{id}'的迁移",
+ "migrations_no_migrations_to_run": "无需迁移即可运行",
+ "migrations_need_to_accept_disclaimer": "要运行迁移{id},您必须接受以下免责声明:\n---\n{disclaimer}\n---\n如果您接受并继续运行迁移,请使用选项'--accept-disclaimer'重新运行该命令。",
+ "migrations_must_provide_explicit_targets": "使用'--skip'或'--force-rerun'时必须提供明确的目标",
+ "migrations_migration_has_failed": "迁移{id}尚未完成,正在中止。错误: {exception}",
+ "migrations_loading_migration": "正在加载迁移{id}...",
+ "migrations_list_conflict_pending_done": "您不能同时使用'--previous' 和'--done'。",
+ "migrations_exclusive_options": "'--auto', '--skip',和'--force-rerun'是互斥的选项。",
+ "migrations_failed_to_load_migration": "无法加载迁移{id}: {error}",
+ "migrations_dependencies_not_satisfied": "在迁移{id}之前运行以下迁移: '{dependencies_id}'。",
+ "migrations_cant_reach_migration_file": "无法访问路径'%s'处的迁移文件",
+ "migrations_already_ran": "这些迁移已经完成: {ids}",
+ "migration_0019_slapd_config_will_be_overwritten": "好像您手动编辑了slapd配置。对于此关键迁移,YunoHost需要强制更新slapd配置。原始文件将备份在{conf_backup_folder}中。",
+ "migration_0019_add_new_attributes_in_ldap": "在LDAP数据库中添加权限的新属性",
+ "migration_0018_failed_to_reset_legacy_rules": "无法重置旧版iptables规则: {error}",
+ "migration_0018_failed_to_migrate_iptables_rules": "无法将旧的iptables规则迁移到nftables: {error}",
+ "migration_0017_not_enough_space": "在{path}中提供足够的空间来运行迁移。",
+ "migration_0017_postgresql_11_not_installed": "已安装PostgreSQL 9.6,但未安装PostgreSQL11?您的系统上可能发生了一些奇怪的事情:(...",
+ "migration_0017_postgresql_96_not_installed": "PostgreSQL未安装在您的系统上。无事可做。",
+ "migration_0015_weak_certs": "发现以下证书仍然使用弱签名算法,并且必须升级以与下一版本的nginx兼容: {certs}",
+ "migration_0015_cleaning_up": "清理不再有用的缓存和软件包...",
+ "migration_0015_specific_upgrade": "开始升级需要独立升级的系统软件包...",
+ "migration_0015_modified_files": "请注意,发现以下文件是手动修改的,并且在升级后可能会被覆盖: {manually_modified_files}",
+ "migration_0015_problematic_apps_warning": "请注意,已检测到以下可能有问题的已安装应用程序。看起来好像那些不是从YunoHost应用程序目录中安装的,或者没有标记为“正在运行”。因此,不能保证它们在升级后仍然可以使用: {problematic_apps}",
+ "migration_0015_general_warning": "请注意,此迁移是一项微妙的操作。YunoHost团队竭尽全力对其进行检查和测试,但迁移仍可能会破坏系统或其应用程序的某些部分。\n\n因此,建议:\n -对任何关键数据或应用程序执行备份。有关更多信息,请访问https://yunohost.org/backup;\n -启动迁移后要耐心:根据您的Internet连接和硬件,升级所有内容最多可能需要几个小时。",
+ "migration_0015_system_not_fully_up_to_date": "您的系统不是最新的。请先执行常规升级,然后再运行向Buster的迁移。",
+ "migration_0015_not_enough_free_space": "/var/中的可用空间非常低!您应该至少有1GB的可用空间来运行此迁移。",
+ "migration_0015_not_stretch": "当前的Debian发行版不是Stretch!",
+ "migration_0015_yunohost_upgrade": "正在启动YunoHost核心升级...",
+ "migration_0015_still_on_stretch_after_main_upgrade": "在主要升级期间出了点问题,系统似乎仍在Debian Stretch上",
+ "migration_0015_main_upgrade": "正在开始主要升级...",
+ "migration_0015_patching_sources_list": "修补sources.lists ...",
+ "migration_0015_start": "开始迁移至Buster",
+ "migration_update_LDAP_schema": "正在更新LDAP模式...",
+ "migration_ldap_rollback_success": "系统回滚。",
+ "migration_ldap_migration_failed_trying_to_rollback": "无法迁移...试图回滚系统。",
+ "migration_ldap_can_not_backup_before_migration": "迁移失败之前,无法完成系统的备份。错误: {error}",
+ "migration_ldap_backup_before_migration": "在实际迁移之前,请创建LDAP数据库和应用程序设置的备份。",
+ "migration_description_0020_ssh_sftp_permissions": "添加SSH和SFTP权限支持",
+ "migration_description_0019_extend_permissions_features": "扩展/修改应用程序的权限管理系统",
+ "migration_description_0018_xtable_to_nftable": "将旧的网络流量规则迁移到新的nftable系统",
+ "migration_description_0017_postgresql_9p6_to_11": "将数据库从PostgreSQL 9.6迁移到11",
+ "migration_description_0016_php70_to_php73_pools": "将php7.0-fpm'pool'conf文件迁移到php7.3",
+ "migration_description_0015_migrate_to_buster": "将系统升级到Debian Buster和YunoHost 4.x",
+ "migrating_legacy_permission_settings": "正在迁移旧版权限设置...",
+ "main_domain_changed": "主域已更改",
+ "main_domain_change_failed": "无法更改主域",
+ "mail_unavailable": "该电子邮件地址是保留的,并且将自动分配给第一个用户",
+ "mailbox_used_space_dovecot_down": "如果要获取使用过的邮箱空间,则必须启动Dovecot邮箱服务",
+ "mailbox_disabled": "用户{user}的电子邮件已关闭",
+ "mail_forward_remove_failed": "无法删除电子邮件转发'{mail}'",
+ "mail_domain_unknown": "域'{domain}'的电子邮件地址无效。请使用本服务器管理的域。",
+ "mail_alias_remove_failed": "无法删除电子邮件别名'{mail}'",
+ "log_tools_reboot": "重新启动服务器",
+ "log_tools_shutdown": "关闭服务器",
+ "log_tools_upgrade": "升级系统软件包",
+ "log_tools_postinstall": "安装好你的YunoHost服务器后",
+ "log_tools_migrations_migrate_forward": "运行迁移",
+ "log_domain_main_domain": "将 '{}' 设为主要域",
+ "log_user_permission_reset": "重置权限'{}'",
+ "log_user_permission_update": "更新权限'{}'的访问权限",
+ "log_user_update": "更新用户'{}'的信息",
+ "log_user_group_update": "更新组'{}'",
+ "log_user_group_delete": "删除组'{}'",
+ "log_user_group_create": "创建组'{}'",
+ "log_user_delete": "删除用户'{}'",
+ "log_user_create": "添加用户'{}'"
+}
\ No newline at end of file
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 000000000..27d690435
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,16 @@
+[pytest]
+addopts = -s -v
+norecursedirs = dist doc build .tox .eggs
+testpaths = tests/
+markers =
+ with_system_archive_from_3p8
+ with_backup_recommended_app_installed
+ clean_opt_dir
+ with_wordpress_archive_from_3p8
+ with_legacy_app_installed
+ with_backup_recommended_app_installed_with_ynh_restore
+ with_permission_app_installed
+ other_domains
+ with_custom_domain
+filterwarnings =
+ ignore::urllib3.exceptions.InsecureRequestWarning
\ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 000000000..db1dde69d
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[flake8]
+ignore = E501,E128,E731,E722
diff --git a/src/yunohost/__init__.py b/src/yunohost/__init__.py
index e69de29bb..dad73e2a4 100644
--- a/src/yunohost/__init__.py
+++ b/src/yunohost/__init__.py
@@ -0,0 +1,161 @@
+#! /usr/bin/python
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+
+import moulinette
+from moulinette import m18n
+from moulinette.utils.log import configure_logging
+from moulinette.interfaces.cli import colorize, get_locale
+
+
+def is_installed():
+ return os.path.isfile("/etc/yunohost/installed")
+
+
+def cli(debug, quiet, output_as, timeout, args, parser):
+
+ init_logging(interface="cli", debug=debug, quiet=quiet)
+
+ # Check that YunoHost is installed
+ if not is_installed():
+ check_command_is_valid_before_postinstall(args)
+
+ ret = moulinette.cli(args, output_as=output_as, timeout=timeout, top_parser=parser)
+ sys.exit(ret)
+
+
+def api(debug, host, port):
+
+ init_logging(interface="api", debug=debug)
+
+ def is_installed_api():
+ return {"installed": is_installed()}
+
+ # FIXME : someday, maybe find a way to disable route /postinstall if
+ # postinstall already done ...
+
+ ret = moulinette.api(
+ host=host,
+ port=port,
+ routes={("GET", "/installed"): is_installed_api},
+ )
+ sys.exit(ret)
+
+
+def check_command_is_valid_before_postinstall(args):
+
+ allowed_if_not_postinstalled = [
+ "tools postinstall",
+ "tools versions",
+ "tools shell",
+ "backup list",
+ "backup restore",
+ "log display",
+ ]
+
+ if len(args) < 2 or (args[0] + " " + args[1] not in allowed_if_not_postinstalled):
+ init_i18n()
+ print(colorize(m18n.g("error"), "red") + " " + m18n.n("yunohost_not_installed"))
+ sys.exit(1)
+
+
+def init(interface="cli", debug=False, quiet=False, logdir="/var/log/yunohost"):
+ """
+ This is a small util function ONLY meant to be used to initialize a Yunohost
+ context when ran from tests or from scripts.
+ """
+ init_logging(interface=interface, debug=debug, quiet=quiet, logdir=logdir)
+ init_i18n()
+ from moulinette.core import MoulinetteLock
+
+ lock = MoulinetteLock("yunohost", timeout=30)
+ lock.acquire()
+ return lock
+
+
+def init_i18n():
+ # This should only be called when not willing to go through moulinette.cli
+ # or moulinette.api but still willing to call m18n.n/g...
+ m18n.load_namespace("yunohost")
+ m18n.set_locale(get_locale())
+
+
+def init_logging(interface="cli", debug=False, quiet=False, logdir="/var/log/yunohost"):
+
+ logfile = os.path.join(logdir, "yunohost-%s.log" % interface)
+
+ if not os.path.isdir(logdir):
+ os.makedirs(logdir, 0o750)
+
+ logging_configuration = {
+ "version": 1,
+ "disable_existing_loggers": True,
+ "formatters": {
+ "console": {
+ "format": "%(relativeCreated)-5d %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s"
+ },
+ "tty-debug": {"format": "%(relativeCreated)-4d %(fmessage)s"},
+ "precise": {
+ "format": "%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s"
+ },
+ },
+ "filters": {
+ "action": {
+ "()": "moulinette.utils.log.ActionFilter",
+ },
+ },
+ "handlers": {
+ "cli": {
+ "level": "DEBUG" if debug else "INFO",
+ "class": "moulinette.interfaces.cli.TTYHandler",
+ "formatter": "tty-debug" if debug else "",
+ },
+ "api": {
+ "level": "DEBUG" if debug else "INFO",
+ "class": "moulinette.interfaces.api.APIQueueHandler",
+ },
+ "file": {
+ "class": "logging.FileHandler",
+ "formatter": "precise",
+ "filename": logfile,
+ "filters": ["action"],
+ },
+ },
+ "loggers": {
+ "yunohost": {
+ "level": "DEBUG",
+ "handlers": ["file", interface] if not quiet else ["file"],
+ "propagate": False,
+ },
+ "moulinette": {
+ "level": "DEBUG",
+ "handlers": ["file", interface] if not quiet else ["file"],
+ "propagate": False,
+ },
+ },
+ "root": {
+ "level": "DEBUG",
+ "handlers": ["file", interface] if debug else ["file"],
+ },
+ }
+
+ # Logging configuration for CLI (or any other interface than api...) #
+ if interface != "api":
+ configure_logging(logging_configuration)
+
+ # Logging configuration for API #
+ else:
+ # We use a WatchedFileHandler instead of regular FileHandler to possibly support log rotation etc
+ logging_configuration["handlers"]["file"][
+ "class"
+ ] = "logging.handlers.WatchedFileHandler"
+
+ # This is for when launching yunohost-api in debug mode, we want to display stuff in the console
+ if debug:
+ logging_configuration["loggers"]["yunohost"]["handlers"].append("cli")
+ logging_configuration["loggers"]["moulinette"]["handlers"].append("cli")
+ logging_configuration["root"]["handlers"].append("cli")
+
+ configure_logging(logging_configuration)
diff --git a/src/yunohost/app.py b/src/yunohost/app.py
index 4831f050c..0013fcd82 100644
--- a/src/yunohost/app.py
+++ b/src/yunohost/app.py
@@ -30,424 +30,388 @@ import shutil
import yaml
import time
import re
-import urlparse
import subprocess
import glob
-import pwd
-import grp
-import urllib
+import tempfile
from collections import OrderedDict
-from datetime import datetime
+from typing import List
-from moulinette import msignals, m18n, msettings
+from moulinette import Moulinette, m18n
+from moulinette.core import MoulinetteError
from moulinette.utils.log import getActionLogger
-from moulinette.utils.filesystem import read_json, read_toml
+from moulinette.utils.network import download_json
+from moulinette.utils.process import run_commands, check_output
+from moulinette.utils.filesystem import (
+ read_file,
+ read_json,
+ read_toml,
+ read_yaml,
+ write_to_file,
+ write_to_json,
+ write_to_yaml,
+ mkdir,
+)
-from yunohost.service import service_log, service_status, _run_service_command
from yunohost.utils import packages
-from yunohost.utils.error import YunohostError
+from yunohost.utils.config import (
+ ConfigPanel,
+ ask_questions_and_parse_answers,
+ Question,
+ DomainQuestion,
+ PathQuestion,
+)
+from yunohost.utils.i18n import _value_for_locale
+from yunohost.utils.error import YunohostError, YunohostValidationError
+from yunohost.utils.filesystem import free_space_in_directory
from yunohost.log import is_unit_operation, OperationLogger
-logger = getActionLogger('yunohost.app')
+logger = getActionLogger("yunohost.app")
-REPO_PATH = '/var/cache/yunohost/repo'
-APPS_PATH = '/usr/share/yunohost/apps'
-APPS_SETTING_PATH = '/etc/yunohost/apps/'
-INSTALL_TMP = '/var/cache/yunohost'
-APP_TMP_FOLDER = INSTALL_TMP + '/from_file'
-APPSLISTS_JSON = '/etc/yunohost/appslists.json'
+APPS_SETTING_PATH = "/etc/yunohost/apps/"
+APP_TMP_WORKDIRS = "/var/cache/yunohost/app_tmp_work_dirs"
-re_github_repo = re.compile(
- r'^(http[s]?://|git@)github.com[/:]'
- '(?P[\w\-_]+)/(?P[\w\-_]+)(.git)?'
- '(/tree/(?P.+))?'
-)
+APPS_CATALOG_CACHE = "/var/cache/yunohost/repo"
+APPS_CATALOG_CONF = "/etc/yunohost/apps_catalog.yml"
+APPS_CATALOG_API_VERSION = 2
+APPS_CATALOG_DEFAULT_URL = "https://app.yunohost.org/default"
re_app_instance_name = re.compile(
- r'^(?P[\w-]+?)(__(?P[1-9][0-9]*))?$'
+ r"^(?P[\w-]+?)(__(?P[1-9][0-9]*))?$"
)
-def app_listlists():
+def app_catalog(full=False, with_categories=False):
"""
- List fetched lists
-
+ Return a dict of apps available to installation from Yunohost's app catalog
"""
- # Migrate appslist system if needed
- # XXX move to a migration when those are implemented
- if _using_legacy_appslist_system():
- _migrate_appslist_system()
+ # Get app list from catalog cache
+ catalog = _load_apps_catalog()
+ installed_apps = set(_installed_apps())
- # Get the list
- appslist_list = _read_appslist_list()
+ # Trim info for apps if not using --full
+ for app, infos in catalog["apps"].items():
+ infos["installed"] = app in installed_apps
- # Convert 'lastUpdate' timestamp to datetime
- for name, infos in appslist_list.items():
- if infos["lastUpdate"] is None:
- infos["lastUpdate"] = 0
- infos["lastUpdate"] = datetime.utcfromtimestamp(infos["lastUpdate"])
+ infos["manifest"]["description"] = _value_for_locale(
+ infos["manifest"]["description"]
+ )
- return appslist_list
-
-
-def app_fetchlist(url=None, name=None):
- """
- Fetch application list(s) from app server. By default, fetch all lists.
-
- Keyword argument:
- name -- Name of the list
- url -- URL of remote JSON list
- """
- if url and not url.endswith(".json"):
- raise YunohostError("This is not a valid application list url. It should end with .json.")
-
- # If needed, create folder where actual appslists are stored
- if not os.path.exists(REPO_PATH):
- os.makedirs(REPO_PATH)
-
- # Migrate appslist system if needed
- # XXX move that to a migration once they are finished
- if _using_legacy_appslist_system():
- _migrate_appslist_system()
-
- # Read the list of appslist...
- appslists = _read_appslist_list()
-
- # Determine the list of appslist to be fetched
- appslists_to_be_fetched = []
-
- # If a url and and a name is given, try to register new list,
- # the fetch only this list
- if url is not None:
- if name:
- operation_logger = OperationLogger('app_fetchlist')
- operation_logger.start()
- _register_new_appslist(url, name)
- # Refresh the appslists dict
- appslists = _read_appslist_list()
- appslists_to_be_fetched = [name]
- operation_logger.success()
+ if not full:
+ catalog["apps"][app] = {
+ "description": infos["manifest"]["description"],
+ "level": infos["level"],
+ }
else:
- raise YunohostError('custom_appslist_name_required')
+ infos["manifest"]["arguments"] = _set_default_ask_questions(
+ infos["manifest"].get("arguments", {})
+ )
- # If a name is given, look for an appslist with that name and fetch it
- elif name is not None:
- if name not in appslists.keys():
- raise YunohostError('appslist_unknown', appslist=name)
- else:
- appslists_to_be_fetched = [name]
+ # Trim info for categories if not using --full
+ for category in catalog["categories"]:
+ category["title"] = _value_for_locale(category["title"])
+ category["description"] = _value_for_locale(category["description"])
+ for subtags in category.get("subtags", []):
+ subtags["title"] = _value_for_locale(subtags["title"])
- # Otherwise, fetch all lists
+ if not full:
+ catalog["categories"] = [
+ {"id": c["id"], "description": c["description"]}
+ for c in catalog["categories"]
+ ]
+
+ if not with_categories:
+ return {"apps": catalog["apps"]}
else:
- appslists_to_be_fetched = appslists.keys()
-
- import requests # lazy loading this module for performance reasons
- # Fetch all appslists to be fetched
- for name in appslists_to_be_fetched:
-
- url = appslists[name]["url"]
-
- logger.debug("Attempting to fetch list %s at %s" % (name, url))
-
- # Download file
- try:
- appslist_request = requests.get(url, timeout=30)
- except requests.exceptions.SSLError:
- logger.error(m18n.n('appslist_retrieve_error',
- appslist=name,
- error="SSL connection error"))
- continue
- except Exception as e:
- logger.error(m18n.n('appslist_retrieve_error',
- appslist=name,
- error=str(e)))
- continue
- if appslist_request.status_code != 200:
- logger.error(m18n.n('appslist_retrieve_error',
- appslist=name,
- error="Server returned code %s " %
- str(appslist_request.status_code)))
- continue
-
- # Validate app list format
- # TODO / Possible improvement : better validation for app list (check
- # that json fields actually look like an app list and not any json
- # file)
- appslist = appslist_request.text
- try:
- json.loads(appslist)
- except ValueError as e:
- logger.error(m18n.n('appslist_retrieve_bad_format',
- appslist=name))
- continue
-
- # Write app list to file
- list_file = '%s/%s.json' % (REPO_PATH, name)
- try:
- with open(list_file, "w") as f:
- f.write(appslist)
- except Exception as e:
- raise YunohostError("Error while writing appslist %s: %s" % (name, str(e)), raw_msg=True)
-
- now = int(time.time())
- appslists[name]["lastUpdate"] = now
-
- logger.success(m18n.n('appslist_fetched', appslist=name))
-
- # Write updated list of appslist
- _write_appslist_list(appslists)
+ return {"apps": catalog["apps"], "categories": catalog["categories"]}
-@is_unit_operation()
-def app_removelist(operation_logger, name):
+def app_search(string):
"""
- Remove list from the repositories
-
- Keyword argument:
- name -- Name of the list to remove
-
+ Return a dict of apps whose description or name match the search string
"""
- appslists = _read_appslist_list()
- # Make sure we know this appslist
- if name not in appslists.keys():
- raise YunohostError('appslist_unknown', appslist=name)
+ # Retrieve a simple dict listing all apps
+ catalog_of_apps = app_catalog()
- operation_logger.start()
-
- # Remove json
- json_path = '%s/%s.json' % (REPO_PATH, name)
- if os.path.exists(json_path):
- os.remove(json_path)
-
- # Forget about this appslist
- del appslists[name]
- _write_appslist_list(appslists)
-
- logger.success(m18n.n('appslist_removed', appslist=name))
-
-
-def app_list(filter=None, raw=False, installed=False, with_backup=False):
- """
- List apps
-
- Keyword argument:
- filter -- Name filter of app_id or app_name
- offset -- Starting number for app fetching
- limit -- Maximum number of app fetched
- raw -- Return the full app_dict
- installed -- Return only installed apps
- with_backup -- Return only apps with backup feature (force --installed filter)
-
- """
- installed = with_backup or installed
-
- app_dict = {}
- list_dict = {} if raw else []
-
- appslists = _read_appslist_list()
-
- for appslist in appslists.keys():
-
- json_path = "%s/%s.json" % (REPO_PATH, appslist)
-
- # If we don't have the json yet, try to fetch it
- if not os.path.exists(json_path):
- app_fetchlist(name=appslist)
-
- # If it now exist
- if os.path.exists(json_path):
- appslist_content = read_json(json_path)
- for app, info in appslist_content.items():
- if app not in app_dict:
- info['repository'] = appslist
- app_dict[app] = info
- else:
- logger.warning("Uh there's no data for applist '%s' ... (That should be just a temporary issue?)" % appslist)
-
- # Get app list from the app settings directory
- for app in os.listdir(APPS_SETTING_PATH):
- if app not in app_dict:
- # Handle multi-instance case like wordpress__2
- if '__' in app:
- original_app = app[:app.index('__')]
- if original_app in app_dict:
- app_dict[app] = app_dict[original_app]
- continue
- # FIXME : What if it's not !?!?
-
- manifest = _get_manifest_of_app(os.path.join(APPS_SETTING_PATH, app))
- app_dict[app] = {"manifest": manifest}
-
- app_dict[app]['repository'] = None
-
- # Sort app list
- sorted_app_list = sorted(app_dict.keys())
-
- for app_id in sorted_app_list:
-
- app_info_dict = app_dict[app_id]
-
- # Apply filter if there's one
- if (filter and
- (filter not in app_id) and
- (filter not in app_info_dict['manifest']['name'])):
- continue
-
- # Ignore non-installed app if user wants only installed apps
- app_installed = _is_installed(app_id)
- if installed and not app_installed:
- continue
-
- # Ignore apps which don't have backup/restore script if user wants
- # only apps with backup features
- if with_backup and (
- not os.path.isfile(APPS_SETTING_PATH + app_id + '/scripts/backup') or
- not os.path.isfile(APPS_SETTING_PATH + app_id + '/scripts/restore')
+ # Selecting apps according to a match in app name or description
+ matching_apps = {"apps": {}}
+ for app in catalog_of_apps["apps"].items():
+ if re.search(string, app[0], flags=re.IGNORECASE) or re.search(
+ string, app[1]["description"], flags=re.IGNORECASE
):
+ matching_apps["apps"][app[0]] = app[1]
+
+ return matching_apps
+
+
+# Old legacy function...
+def app_fetchlist():
+ logger.warning(
+ "'yunohost app fetchlist' is deprecated. Please use 'yunohost tools update --apps' instead"
+ )
+ from yunohost.tools import tools_update
+
+ tools_update(target="apps")
+
+
+def app_list(full=False, installed=False, filter=None):
+ """
+ List installed apps
+ """
+
+ # Old legacy argument ... app_list was a combination of app_list and
+ # app_catalog before 3.8 ...
+ if installed:
+ logger.warning(
+ "Argument --installed ain't needed anymore when using 'yunohost app list'. It directly returns the list of installed apps.."
+ )
+
+ # Filter is a deprecated option...
+ if filter:
+ logger.warning(
+ "Using -f $appname in 'yunohost app list' is deprecated. Just use 'yunohost app list | grep -q 'id: $appname' to check a specific app is installed"
+ )
+
+ out = []
+ for app_id in sorted(_installed_apps()):
+
+ if filter and not app_id.startswith(filter):
continue
- if raw:
- app_info_dict['installed'] = app_installed
- if app_installed:
- app_info_dict['status'] = _get_app_status(app_id)
+ try:
+ app_info_dict = app_info(app_id, full=full)
+ except Exception as e:
+ logger.error("Failed to read info for %s : %s" % (app_id, e))
+ continue
+ app_info_dict["id"] = app_id
+ out.append(app_info_dict)
- # dirty: we used to have manifest containing multi_instance value in form of a string
- # but we've switched to bool, this line ensure retrocompatibility
- app_info_dict["manifest"]["multi_instance"] = is_true(app_info_dict["manifest"].get("multi_instance", False))
-
- list_dict[app_id] = app_info_dict
-
- else:
- label = None
- if app_installed:
- app_info_dict_raw = app_info(app=app_id, raw=True)
- label = app_info_dict_raw['settings']['label']
-
- list_dict.append({
- 'id': app_id,
- 'name': app_info_dict['manifest']['name'],
- 'label': label,
- 'description': _value_for_locale(app_info_dict['manifest']['description']),
- # FIXME: Temporarly allow undefined license
- 'license': app_info_dict['manifest'].get('license', m18n.n('license_undefined')),
- 'installed': app_installed
- })
-
- return {'apps': list_dict} if not raw else list_dict
+ return {"apps": out}
-def app_info(app, show_status=False, raw=False):
+def app_info(app, full=False):
"""
- Get app info
-
- Keyword argument:
- app -- Specific app ID
- show_status -- Show app installation status
- raw -- Return the full app_dict
-
+ Get info for a specific app
"""
- if not _is_installed(app):
- raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id())
+ from yunohost.permission import user_permission_list
- app_setting_path = APPS_SETTING_PATH + app
+ _assert_is_installed(app)
- if raw:
- ret = app_list(filter=app, raw=True)[app]
- ret['settings'] = _get_app_settings(app)
+ setting_path = os.path.join(APPS_SETTING_PATH, app)
+ local_manifest = _get_manifest_of_app(setting_path)
+ permissions = user_permission_list(full=True, absolute_urls=True, apps=[app])[
+ "permissions"
+ ]
- # Determine upgradability
- # In case there is neither update_time nor install_time, we assume the app can/has to be upgraded
- local_update_time = ret['settings'].get('update_time', ret['settings'].get('install_time', 0))
+ settings = _get_app_settings(app)
- if 'lastUpdate' not in ret or 'git' not in ret:
- upgradable = "url_required"
- elif ret['lastUpdate'] > local_update_time:
- upgradable = "yes"
- else:
- upgradable = "no"
+ ret = {
+ "description": _value_for_locale(local_manifest["description"]),
+ "name": permissions.get(app + ".main", {}).get("label", local_manifest["name"]),
+ "version": local_manifest.get("version", "-"),
+ }
- ret['upgradable'] = upgradable
- ret['change_url'] = os.path.exists(os.path.join(app_setting_path, "scripts", "change_url"))
-
- manifest = _get_manifest_of_app(os.path.join(APPS_SETTING_PATH, app))
-
- ret['version'] = manifest.get('version', '-')
+ if "domain" in settings and "path" in settings:
+ ret["domain_path"] = settings["domain"] + settings["path"]
+ if not full:
return ret
- # Retrieve manifest and status
- manifest = _get_manifest_of_app(app_setting_path)
- status = _get_app_status(app, format_date=True)
+ ret["setting_path"] = setting_path
+ ret["manifest"] = local_manifest
+ ret["manifest"]["arguments"] = _set_default_ask_questions(
+ ret["manifest"].get("arguments", {})
+ )
+ ret["settings"] = settings
- info = {
- 'name': manifest['name'],
- 'description': _value_for_locale(manifest['description']),
- # FIXME: Temporarly allow undefined license
- 'license': manifest.get('license', m18n.n('license_undefined')),
- # FIXME: Temporarly allow undefined version
- 'version': manifest.get('version', '-'),
- # TODO: Add more info
- }
- if show_status:
- info['status'] = status
- return info
+ absolute_app_name, _ = _parse_app_instance_name(app)
+ ret["from_catalog"] = _load_apps_catalog()["apps"].get(absolute_app_name, {})
+ ret["upgradable"] = _app_upgradable(ret)
+ ret["supports_change_url"] = os.path.exists(
+ os.path.join(setting_path, "scripts", "change_url")
+ )
+ ret["supports_backup_restore"] = os.path.exists(
+ os.path.join(setting_path, "scripts", "backup")
+ ) and os.path.exists(os.path.join(setting_path, "scripts", "restore"))
+ ret["supports_multi_instance"] = is_true(
+ local_manifest.get("multi_instance", False)
+ )
+ ret["supports_config_panel"] = os.path.exists(
+ os.path.join(setting_path, "config_panel.toml")
+ )
+
+ ret["permissions"] = permissions
+ ret["label"] = permissions.get(app + ".main", {}).get("label")
+
+ if not ret["label"]:
+ logger.warning("Failed to get label for app %s ?" % app)
+ return ret
+
+
+def _app_upgradable(app_infos):
+ from packaging import version
+
+ # Determine upgradability
+
+ app_in_catalog = app_infos.get("from_catalog")
+ installed_version = version.parse(app_infos.get("version", "0~ynh0"))
+ version_in_catalog = version.parse(
+ app_infos.get("from_catalog", {}).get("manifest", {}).get("version", "0~ynh0")
+ )
+
+ if not app_in_catalog:
+ return "url_required"
+
+ # Do not advertise upgrades for bad-quality apps
+ level = app_in_catalog.get("level", -1)
+ if (
+ not (isinstance(level, int) and level >= 5)
+ or app_in_catalog.get("state") != "working"
+ ):
+ return "bad_quality"
+
+ # If the app uses the standard version scheme, use it to determine
+ # upgradability
+ if "~ynh" in str(installed_version) and "~ynh" in str(version_in_catalog):
+ if installed_version < version_in_catalog:
+ return "yes"
+ else:
+ return "no"
+
+ # Legacy stuff for app with old / non-standard version numbers...
+
+ # In case there is neither update_time nor install_time, we assume the app can/has to be upgraded
+ if not app_infos["from_catalog"].get("lastUpdate") or not app_infos[
+ "from_catalog"
+ ].get("git"):
+ return "url_required"
+
+ settings = app_infos["settings"]
+ local_update_time = settings.get("update_time", settings.get("install_time", 0))
+ if app_infos["from_catalog"]["lastUpdate"] > local_update_time:
+ return "yes"
+ else:
+ return "no"
def app_map(app=None, raw=False, user=None):
"""
- List apps by domain
+ Returns a map of url <-> app id such as :
- Keyword argument:
- user -- Allowed app map for a user
- raw -- Return complete dict
- app -- Specific app to map
+ {
+ "domain.tld/foo": "foo__2",
+ "domain.tld/mail: "rainloop",
+ "other.tld/": "bar",
+ "sub.other.tld/pwet": "pwet",
+ }
+ When using "raw", the structure changes to :
+
+ {
+ "domain.tld": {
+ "/foo": {"label": "App foo", "id": "foo__2"},
+ "/mail": {"label": "Rainloop", "id: "rainloop"},
+ },
+ "other.tld": {
+ "/": {"label": "Bar", "id": "bar"},
+ },
+ "sub.other.tld": {
+ "/pwet": {"label": "Pwet", "id": "pwet"}
+ }
+ }
"""
+
from yunohost.permission import user_permission_list
- from yunohost.utils.ldap import _get_ldap_interface
apps = []
result = {}
if app is not None:
if not _is_installed(app):
- raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id())
- apps = [app, ]
+ raise YunohostValidationError(
+ "app_not_installed", app=app, all_apps=_get_all_installed_apps_id()
+ )
+ apps = [
+ app,
+ ]
else:
- apps = os.listdir(APPS_SETTING_PATH)
+ apps = _installed_apps()
+ permissions = user_permission_list(full=True, absolute_urls=True, apps=apps)[
+ "permissions"
+ ]
for app_id in apps:
app_settings = _get_app_settings(app_id)
if not app_settings:
continue
- if 'domain' not in app_settings:
+ if "domain" not in app_settings:
continue
- if 'path' not in app_settings:
+ if "path" not in app_settings:
# we assume that an app that doesn't have a path doesn't have an HTTP api
continue
- if 'no_sso' in app_settings: # I don't think we need to check for the value here
+ # This 'no_sso' settings sound redundant to not having $path defined ....
+ # At least from what I can see, all apps using it don't have a path defined ...
+ if (
+ "no_sso" in app_settings
+ ): # I don't think we need to check for the value here
continue
- if user is not None:
- ldap = _get_ldap_interface()
- if not ldap.search(base='ou=permission,dc=yunohost,dc=org',
- filter='(&(objectclass=permissionYnh)(cn=main.%s)(inheritPermission=uid=%s,ou=users,dc=yunohost,dc=org))' % (app_id, user),
- attrs=['cn']):
+ # Users must at least have access to the main permission to have access to extra permissions
+ if user:
+ if not app_id + ".main" in permissions:
+ logger.warning(
+ "Uhoh, no main permission was found for app %s ... sounds like an app was only partially removed due to another bug :/"
+ % app_id
+ )
+ continue
+ main_perm = permissions[app_id + ".main"]
+ if user not in main_perm["corresponding_users"]:
continue
- domain = app_settings['domain']
- path = app_settings['path']
+ this_app_perms = {
+ p: i
+ for p, i in permissions.items()
+ if p.startswith(app_id + ".") and (i["url"] or i["additional_urls"])
+ }
- if raw:
- if domain not in result:
- result[domain] = {}
- result[domain][path] = {
- 'label': app_settings['label'],
- 'id': app_settings['id']
- }
- else:
- result[domain + path] = app_settings['label']
+ for perm_name, perm_info in this_app_perms.items():
+ # If we're building the map for a specific user, check the user
+ # actually is allowed for this specific perm
+ if user and user not in perm_info["corresponding_users"]:
+ continue
+
+ perm_label = perm_info["label"]
+ perm_all_urls = (
+ []
+ + ([perm_info["url"]] if perm_info["url"] else [])
+ + perm_info["additional_urls"]
+ )
+
+ for url in perm_all_urls:
+
+ # Here, we decide to completely ignore regex-type urls ...
+ # Because :
+ # - displaying them in regular "yunohost app map" output creates
+ # a pretty big mess when there are multiple regexes for the same
+ # app ? (c.f. for example lufi)
+ # - it doesn't really make sense when checking app conflicts to
+ # compare regexes ? (Or it could in some cases but ugh ?)
+ #
+ if url.startswith("re:"):
+ continue
+
+ if not raw:
+ result[url] = perm_label
+ else:
+ if "/" in url:
+ perm_domain, perm_path = url.split("/", 1)
+ perm_path = "/" + perm_path
+ else:
+ perm_domain = url
+ perm_path = "/"
+ if perm_domain not in result:
+ result[perm_domain] = {}
+ result[perm_domain][perm_path] = {"label": perm_label, "id": app_id}
return result
@@ -464,83 +428,57 @@ def app_change_url(operation_logger, app, domain, path):
"""
from yunohost.hook import hook_exec, hook_callback
- from yunohost.domain import _normalize_domain_path, _get_conflicting_apps
- from yunohost.permission import permission_update
+ from yunohost.service import service_reload_or_restart
installed = _is_installed(app)
if not installed:
- raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id())
+ raise YunohostValidationError(
+ "app_not_installed", app=app, all_apps=_get_all_installed_apps_id()
+ )
- if not os.path.exists(os.path.join(APPS_SETTING_PATH, app, "scripts", "change_url")):
- raise YunohostError("app_change_no_change_url_script", app_name=app)
+ if not os.path.exists(
+ os.path.join(APPS_SETTING_PATH, app, "scripts", "change_url")
+ ):
+ raise YunohostValidationError("app_change_url_no_script", app_name=app)
old_domain = app_setting(app, "domain")
old_path = app_setting(app, "path")
# Normalize path and domain format
- old_domain, old_path = _normalize_domain_path(old_domain, old_path)
- domain, path = _normalize_domain_path(domain, path)
+
+ domain = DomainQuestion.normalize(domain)
+ old_domain = DomainQuestion.normalize(old_domain)
+ path = PathQuestion.normalize(path)
+ old_path = PathQuestion.normalize(old_path)
if (domain, path) == (old_domain, old_path):
- raise YunohostError("app_change_url_identical_domains", domain=domain, path=path)
+ raise YunohostValidationError(
+ "app_change_url_identical_domains", domain=domain, path=path
+ )
# Check the url is available
- conflicts = _get_conflicting_apps(domain, path, ignore_app=app)
- if conflicts:
- apps = []
- for path, app_id, app_label in conflicts:
- apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format(
- domain=domain,
- path=path,
- app_id=app_id,
- app_label=app_label,
- ))
- raise YunohostError('app_location_unavailable', apps="\n".join(apps))
+ _assert_no_conflicting_apps(domain, path, ignore_app=app)
- manifest = _get_manifest_of_app(os.path.join(APPS_SETTING_PATH, app))
-
- # Retrieve arguments list for change_url script
- # TODO: Allow to specify arguments
- args_odict = _parse_args_from_manifest(manifest, 'change_url')
- args_list = [ value[0] for value in args_odict.values() ]
- args_list.append(app)
+ tmp_workdir_for_app = _make_tmp_workdir_for_app(app=app)
# Prepare env. var. to pass to script
- env_dict = _make_environment_dict(args_odict)
- app_id, app_instance_nb = _parse_app_instance_name(app)
- env_dict["YNH_APP_ID"] = app_id
- env_dict["YNH_APP_INSTANCE_NAME"] = app
- env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb)
-
+ env_dict = _make_environment_for_app_script(app)
env_dict["YNH_APP_OLD_DOMAIN"] = old_domain
env_dict["YNH_APP_OLD_PATH"] = old_path
env_dict["YNH_APP_NEW_DOMAIN"] = domain
env_dict["YNH_APP_NEW_PATH"] = path
+ env_dict["YNH_APP_BASEDIR"] = tmp_workdir_for_app
if domain != old_domain:
- operation_logger.related_to.append(('domain', old_domain))
- operation_logger.extra.update({'env': env_dict})
+ operation_logger.related_to.append(("domain", old_domain))
+ operation_logger.extra.update({"env": env_dict})
operation_logger.start()
- if os.path.exists(os.path.join(APP_TMP_FOLDER, "scripts")):
- shutil.rmtree(os.path.join(APP_TMP_FOLDER, "scripts"))
-
- shutil.copytree(os.path.join(APPS_SETTING_PATH, app, "scripts"),
- os.path.join(APP_TMP_FOLDER, "scripts"))
-
- if os.path.exists(os.path.join(APP_TMP_FOLDER, "conf")):
- shutil.rmtree(os.path.join(APP_TMP_FOLDER, "conf"))
-
- shutil.copytree(os.path.join(APPS_SETTING_PATH, app, "conf"),
- os.path.join(APP_TMP_FOLDER, "conf"))
+ change_url_script = os.path.join(tmp_workdir_for_app, "scripts/change_url")
# Execute App change_url script
- os.system('chown -R admin: %s' % INSTALL_TMP)
- os.system('chmod +x %s' % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts")))
- os.system('chmod +x %s' % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts", "change_url")))
-
- if hook_exec(os.path.join(APP_TMP_FOLDER, 'scripts/change_url'),
- args=args_list, env=env_dict)[0] != 0:
+ ret = hook_exec(change_url_script, env=env_dict)[0]
+ if ret != 0:
msg = "Failed to change '%s' url." % app
logger.error(msg)
operation_logger.error(msg)
@@ -550,31 +488,22 @@ def app_change_url(operation_logger, app, domain, path):
app_setting(app, "domain", value=old_domain)
app_setting(app, "path", value=old_path)
return
+ shutil.rmtree(tmp_workdir_for_app)
# this should idealy be done in the change_url script but let's avoid common mistakes
- app_setting(app, 'domain', value=domain)
- app_setting(app, 'path', value=path)
+ app_setting(app, "domain", value=domain)
+ app_setting(app, "path", value=path)
- permission_update(app, permission="main", add_url=[domain+path], remove_url=[old_domain+old_path], sync_perm=True)
+ app_ssowatconf()
- # avoid common mistakes
- if _run_service_command("reload", "nginx") is False:
- # grab nginx errors
- # the "exit 0" is here to avoid check_output to fail because 'nginx -t'
- # will return != 0 since we are in a failed state
- nginx_errors = subprocess.check_output("nginx -t; exit 0",
- stderr=subprocess.STDOUT,
- shell=True).rstrip()
+ service_reload_or_restart("nginx")
- raise YunohostError("app_change_url_failed_nginx_reload", nginx_errors=nginx_errors)
+ logger.success(m18n.n("app_change_url_success", app=app, domain=domain, path=path))
- logger.success(m18n.n("app_change_url_success",
- app=app, domain=domain, path=path))
-
- hook_callback('post_app_change_url', args=args_list, env=env_dict)
+ hook_callback("post_app_change_url", env=env_dict)
-def app_upgrade(app=[], url=None, file=None):
+def app_upgrade(app=[], url=None, file=None, force=False, no_safety_backup=False):
"""
Upgrade app
@@ -582,146 +511,280 @@ def app_upgrade(app=[], url=None, file=None):
file -- Folder or tarball for upgrade
app -- App(s) to upgrade (default all)
url -- Git url to fetch for upgrade
+ no_safety_backup -- Disable the safety backup during upgrade
"""
- if packages.dpkg_is_broken():
- raise YunohostError("dpkg_is_broken")
-
- from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback
+ from packaging import version
+ from yunohost.hook import (
+ hook_add,
+ hook_remove,
+ hook_callback,
+ hook_exec_with_script_debug_if_failure,
+ )
from yunohost.permission import permission_sync_to_user
-
- # Retrieve interface
- is_api = msettings.get('interface') == 'api'
-
- try:
- app_list()
- except YunohostError:
- raise YunohostError('app_no_upgrade')
-
- not_upgraded_apps = []
+ from yunohost.regenconf import manually_modified_files
apps = app
+ # Check if disk space available
+ if free_space_in_directory("/") <= 512 * 1000 * 1000:
+ raise YunohostValidationError("disk_space_not_sufficient_update")
# If no app is specified, upgrade all apps
if not apps:
# FIXME : not sure what's supposed to happen if there is a url and a file but no apps...
if not url and not file:
- apps = [app["id"] for app in app_list(installed=True)["apps"]]
+ apps = _installed_apps()
elif not isinstance(app, list):
apps = [app]
# Remove possible duplicates
- apps = [app for i,app in enumerate(apps) if apps not in apps[:i]]
+ apps = [app_ for i, app_ in enumerate(apps) if app_ not in apps[:i]]
# Abort if any of those app is in fact not installed..
- for app in [app for app in apps if not _is_installed(app)]:
- raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id())
+ for app_ in apps:
+ _assert_is_installed(app_)
if len(apps) == 0:
- raise YunohostError('app_no_upgrade')
+ raise YunohostValidationError("apps_already_up_to_date")
if len(apps) > 1:
logger.info(m18n.n("app_upgrade_several_apps", apps=", ".join(apps)))
- for app_instance_name in apps:
- logger.info(m18n.n('app_upgrade_app_name', app=app_instance_name))
+ for number, app_instance_name in enumerate(apps):
+ logger.info(m18n.n("app_upgrade_app_name", app=app_instance_name))
- app_dict = app_info(app_instance_name, raw=True)
+ app_dict = app_info(app_instance_name, full=True)
- if file:
+ if file and isinstance(file, dict):
+ # We use this dirty hack to test chained upgrades in unit/functional tests
+ manifest, extracted_app_folder = _extract_app_from_file(
+ file[app_instance_name]
+ )
+ elif file:
manifest, extracted_app_folder = _extract_app_from_file(file)
elif url:
manifest, extracted_app_folder = _fetch_app_from_git(url)
elif app_dict["upgradable"] == "url_required":
- logger.warning(m18n.n('custom_app_url_required', app=app_instance_name))
+ logger.warning(m18n.n("custom_app_url_required", app=app_instance_name))
continue
- elif app_dict["upgradable"] == "yes":
+ elif app_dict["upgradable"] == "yes" or force:
manifest, extracted_app_folder = _fetch_app_from_git(app_instance_name)
else:
- logger.success(m18n.n('app_already_up_to_date', app=app_instance_name))
+ logger.success(m18n.n("app_already_up_to_date", app=app_instance_name))
continue
+ # Manage upgrade type and avoid any upgrade if there is nothing to do
+ upgrade_type = "UNKNOWN"
+ # Get current_version and new version
+ app_new_version = version.parse(manifest.get("version", "?"))
+ app_current_version = version.parse(app_dict.get("version", "?"))
+ if "~ynh" in str(app_current_version) and "~ynh" in str(app_new_version):
+ if app_current_version >= app_new_version and not force:
+ # In case of upgrade from file or custom repository
+ # No new version available
+ logger.success(m18n.n("app_already_up_to_date", app=app_instance_name))
+ # Save update time
+ now = int(time.time())
+ app_setting(app_instance_name, "update_time", now)
+ app_setting(
+ app_instance_name,
+ "current_revision",
+ manifest.get("remote", {}).get("revision", "?"),
+ )
+ continue
+ elif app_current_version > app_new_version:
+ upgrade_type = "DOWNGRADE_FORCED"
+ elif app_current_version == app_new_version:
+ upgrade_type = "UPGRADE_FORCED"
+ else:
+ app_current_version_upstream, app_current_version_pkg = str(
+ app_current_version
+ ).split("~ynh")
+ app_new_version_upstream, app_new_version_pkg = str(
+ app_new_version
+ ).split("~ynh")
+ if app_current_version_upstream == app_new_version_upstream:
+ upgrade_type = "UPGRADE_PACKAGE"
+ elif app_current_version_pkg == app_new_version_pkg:
+ upgrade_type = "UPGRADE_APP"
+ else:
+ upgrade_type = "UPGRADE_FULL"
+
# Check requirements
_check_manifest_requirements(manifest, app_instance_name=app_instance_name)
- _check_services_status_for_app(manifest.get("services", []))
+ _assert_system_is_sane_for_app(manifest, "pre")
- app_setting_path = APPS_SETTING_PATH + '/' + app_instance_name
-
- # Retrieve current app status
- status = _get_app_status(app_instance_name)
- status['remote'] = manifest.get('remote', None)
-
- # Retrieve arguments list for upgrade script
- # TODO: Allow to specify arguments
- args_odict = _parse_args_from_manifest(manifest, 'upgrade')
- args_list = [ value[0] for value in args_odict.values() ]
- args_list.append(app_instance_name)
+ app_setting_path = os.path.join(APPS_SETTING_PATH, app_instance_name)
# Prepare env. var. to pass to script
- env_dict = _make_environment_dict(args_odict)
- app_id, app_instance_nb = _parse_app_instance_name(app_instance_name)
- env_dict["YNH_APP_ID"] = app_id
- env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name
- env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb)
+ env_dict = _make_environment_for_app_script(app_instance_name)
+ env_dict["YNH_APP_UPGRADE_TYPE"] = upgrade_type
+ env_dict["YNH_APP_MANIFEST_VERSION"] = str(app_new_version)
+ env_dict["YNH_APP_CURRENT_VERSION"] = str(app_current_version)
+ env_dict["NO_BACKUP_UPGRADE"] = "1" if no_safety_backup else "0"
+ env_dict["YNH_APP_BASEDIR"] = extracted_app_folder
- # Start register change on system
- related_to = [('app', app_instance_name)]
- operation_logger = OperationLogger('app_upgrade', related_to, env=env_dict)
- operation_logger.start()
+ # We'll check that the app didn't brutally edit some system configuration
+ manually_modified_files_before_install = manually_modified_files()
+
+ # Attempt to patch legacy helpers ...
+ _patch_legacy_helpers(extracted_app_folder)
# Apply dirty patch to make php5 apps compatible with php7
- _patch_php5(extracted_app_folder)
+ _patch_legacy_php_versions(extracted_app_folder)
- # Execute App upgrade script
- os.system('chown -hR admin: %s' % INSTALL_TMP)
- if hook_exec(extracted_app_folder + '/scripts/upgrade',
- args=args_list, env=env_dict)[0] != 0:
- msg = m18n.n('app_upgrade_failed', app=app_instance_name)
- not_upgraded_apps.append(app_instance_name)
- logger.error(msg)
- operation_logger.error(msg)
- else:
+ # Start register change on system
+ related_to = [("app", app_instance_name)]
+ operation_logger = OperationLogger("app_upgrade", related_to, env=env_dict)
+ operation_logger.start()
+
+ # Execute the app upgrade script
+ upgrade_failed = True
+ try:
+ (
+ upgrade_failed,
+ failure_message_with_debug_instructions,
+ ) = hook_exec_with_script_debug_if_failure(
+ extracted_app_folder + "/scripts/upgrade",
+ env=env_dict,
+ operation_logger=operation_logger,
+ error_message_if_script_failed=m18n.n("app_upgrade_script_failed"),
+ error_message_if_failed=lambda e: m18n.n(
+ "app_upgrade_failed", app=app_instance_name, error=e
+ ),
+ )
+ finally:
+ # Whatever happened (install success or failure) we check if it broke the system
+ # and warn the user about it
+ try:
+ broke_the_system = False
+ _assert_system_is_sane_for_app(manifest, "post")
+ except Exception as e:
+ broke_the_system = True
+ logger.error(
+ m18n.n("app_upgrade_failed", app=app_instance_name, error=str(e))
+ )
+ failure_message_with_debug_instructions = operation_logger.error(str(e))
+
+ # We'll check that the app didn't brutally edit some system configuration
+ manually_modified_files_after_install = manually_modified_files()
+ manually_modified_files_by_app = set(
+ manually_modified_files_after_install
+ ) - set(manually_modified_files_before_install)
+ if manually_modified_files_by_app:
+ logger.error(
+ "Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - "
+ + "\n -".join(manually_modified_files_by_app)
+ )
+
+ # If upgrade failed or broke the system,
+ # raise an error and interrupt all other pending upgrades
+ if upgrade_failed or broke_the_system:
+
+ # display this if there are remaining apps
+ if apps[number + 1 :]:
+ not_upgraded_apps = apps[number:]
+ logger.error(
+ m18n.n(
+ "app_not_upgraded",
+ failed_app=app_instance_name,
+ apps=", ".join(not_upgraded_apps),
+ )
+ )
+
+ raise YunohostError(
+ failure_message_with_debug_instructions, raw_msg=True
+ )
+
+ # Otherwise we're good and keep going !
now = int(time.time())
- # TODO: Move install_time away from app_setting
- app_setting(app_instance_name, 'update_time', now)
- status['upgraded_at'] = now
+ app_setting(app_instance_name, "update_time", now)
+ app_setting(
+ app_instance_name,
+ "current_revision",
+ manifest.get("remote", {}).get("revision", "?"),
+ )
# Clean hooks and add new ones
hook_remove(app_instance_name)
- if 'hooks' in os.listdir(extracted_app_folder):
- for hook in os.listdir(extracted_app_folder + '/hooks'):
- hook_add(app_instance_name, extracted_app_folder + '/hooks/' + hook)
-
- # Store app status
- with open(app_setting_path + '/status.json', 'w+') as f:
- json.dump(status, f)
+ if "hooks" in os.listdir(extracted_app_folder):
+ for hook in os.listdir(extracted_app_folder + "/hooks"):
+ hook_add(app_instance_name, extracted_app_folder + "/hooks/" + hook)
# Replace scripts and manifest and conf (if exists)
- os.system('rm -rf "%s/scripts" "%s/manifest.toml %s/manifest.json %s/conf"' % (app_setting_path, app_setting_path, app_setting_path))
+ os.system(
+ 'rm -rf "%s/scripts" "%s/manifest.toml %s/manifest.json %s/conf"'
+ % (
+ app_setting_path,
+ app_setting_path,
+ app_setting_path,
+ app_setting_path,
+ )
+ )
if os.path.exists(os.path.join(extracted_app_folder, "manifest.json")):
- os.system('mv "%s/manifest.json" "%s/scripts" %s' % (extracted_app_folder, extracted_app_folder, app_setting_path))
+ os.system(
+ 'mv "%s/manifest.json" "%s/scripts" %s'
+ % (extracted_app_folder, extracted_app_folder, app_setting_path)
+ )
if os.path.exists(os.path.join(extracted_app_folder, "manifest.toml")):
- os.system('mv "%s/manifest.toml" "%s/scripts" %s' % (extracted_app_folder, extracted_app_folder, app_setting_path))
+ os.system(
+ 'mv "%s/manifest.toml" "%s/scripts" %s'
+ % (extracted_app_folder, extracted_app_folder, app_setting_path)
+ )
- for file_to_copy in ["actions.json", "actions.toml", "config_panel.json", "config_panel.toml", "conf"]:
+ for file_to_copy in [
+ "actions.json",
+ "actions.toml",
+ "config_panel.toml",
+ "conf",
+ ]:
if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)):
- os.system('cp -R %s/%s %s' % (extracted_app_folder, file_to_copy, app_setting_path))
+ os.system(
+ "cp -R %s/%s %s"
+ % (extracted_app_folder, file_to_copy, app_setting_path)
+ )
+
+ # Clean and set permissions
+ shutil.rmtree(extracted_app_folder)
+ os.system("chmod 600 %s" % app_setting_path)
+ os.system("chmod 400 %s/settings.yml" % app_setting_path)
+ os.system("chown -R root: %s" % app_setting_path)
# So much win
- logger.success(m18n.n('app_upgraded', app=app_instance_name))
+ logger.success(m18n.n("app_upgraded", app=app_instance_name))
- hook_callback('post_app_upgrade', args=args_list, env=env_dict)
+ hook_callback("post_app_upgrade", env=env_dict)
operation_logger.success()
- if not_upgraded_apps:
- raise YunohostError('app_not_upgraded', apps=', '.join(not_upgraded_apps))
-
permission_sync_to_user()
- logger.success(m18n.n('upgrade_complete'))
+ logger.success(m18n.n("upgrade_complete"))
+
+
+def app_manifest(app):
+
+ raw_app_list = _load_apps_catalog()["apps"]
+
+ if app in raw_app_list or ("@" in app) or ("http://" in app) or ("https://" in app):
+ manifest, extracted_app_folder = _fetch_app_from_git(app)
+ elif os.path.exists(app):
+ manifest, extracted_app_folder = _extract_app_from_file(app)
+ else:
+ raise YunohostValidationError("app_unknown")
+
+ shutil.rmtree(extracted_app_folder)
+
+ return manifest
@is_unit_operation()
-def app_install(operation_logger, app, label=None, args=None, no_remove_on_failure=False, force=False):
+def app_install(
+ operation_logger,
+ app,
+ label=None,
+ args=None,
+ no_remove_on_failure=False,
+ force=False,
+):
"""
Install apps
@@ -732,47 +795,70 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
no_remove_on_failure -- Debug option to avoid removing the app on a failed installation
force -- Do not ask for confirmation when installing experimental / low-quality apps
"""
- if packages.dpkg_is_broken():
- raise YunohostError("dpkg_is_broken")
- from yunohost.utils.ldap import _get_ldap_interface
- from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback
+ from yunohost.hook import (
+ hook_add,
+ hook_remove,
+ hook_callback,
+ hook_exec,
+ hook_exec_with_script_debug_if_failure,
+ )
from yunohost.log import OperationLogger
- from yunohost.permission import permission_add, permission_update, permission_remove, permission_sync_to_user
- ldap = _get_ldap_interface()
-
- # Fetch or extract sources
- if not os.path.exists(INSTALL_TMP):
- os.makedirs(INSTALL_TMP)
-
- status = {
- 'installed_at': int(time.time()),
- 'upgraded_at': None,
- 'remote': {
- 'type': None,
- },
- }
+ from yunohost.permission import (
+ user_permission_list,
+ permission_create,
+ permission_delete,
+ permission_sync_to_user,
+ )
+ from yunohost.regenconf import manually_modified_files
def confirm_install(confirm):
# Ignore if there's nothing for confirm (good quality app), if --force is used
# or if request on the API (confirm already implemented on the API side)
- if confirm is None or force or msettings.get('interface') == 'api':
+ if confirm is None or force or Moulinette.interface.type == "api":
return
- answer = msignals.prompt(m18n.n('confirm_app_install_' + confirm,
- answers='Y/N'))
- if answer.upper() != "Y":
- raise YunohostError("aborting")
+ # i18n: confirm_app_install_warning
+ # i18n: confirm_app_install_danger
+ # i18n: confirm_app_install_thirdparty
- raw_app_list = app_list(raw=True)
+ if confirm in ["danger", "thirdparty"]:
+ answer = Moulinette.prompt(
+ m18n.n("confirm_app_install_" + confirm, answers="Yes, I understand"),
+ color="red",
+ )
+ if answer != "Yes, I understand":
+ raise YunohostError("aborting")
- if app in raw_app_list or ('@' in app) or ('http://' in app) or ('https://' in app):
+ else:
+ answer = Moulinette.prompt(
+ m18n.n("confirm_app_install_" + confirm, answers="Y/N"), color="yellow"
+ )
+ if answer.upper() != "Y":
+ raise YunohostError("aborting")
+
+ raw_app_list = _load_apps_catalog()["apps"]
+
+ if app in raw_app_list or ("@" in app) or ("http://" in app) or ("https://" in app):
+
+ # If we got an app name directly (e.g. just "wordpress"), we gonna test this name
if app in raw_app_list:
- state = raw_app_list[app].get("state", "notworking")
- level = raw_app_list[app].get("level", None)
+ app_name_to_test = app
+ # If we got an url like "https://github.com/foo/bar_ynh, we want to
+ # extract "bar" and test if we know this app
+ elif ("http://" in app) or ("https://" in app):
+ app_name_to_test = app.strip("/").split("/")[-1].replace("_ynh", "")
+ else:
+ # FIXME : watdo if '@' in app ?
+ app_name_to_test = None
+
+ if app_name_to_test in raw_app_list:
+
+ state = raw_app_list[app_name_to_test].get("state", "notworking")
+ level = raw_app_list[app_name_to_test].get("level", None)
confirm = "danger"
if state in ["working", "validated"]:
- if isinstance(level, int) and level >= 3:
+ if isinstance(level, int) and level >= 5:
confirm = None
elif isinstance(level, int) and level > 0:
confirm = "warning"
@@ -786,54 +872,59 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
confirm_install("thirdparty")
manifest, extracted_app_folder = _extract_app_from_file(app)
else:
- raise YunohostError('app_unknown')
- status['remote'] = manifest.get('remote', {})
+ raise YunohostValidationError("app_unknown")
+
+ # Check if disk space available
+ if free_space_in_directory("/") <= 512 * 1000 * 1000:
+ raise YunohostValidationError("disk_space_not_sufficient_install")
# Check ID
- if 'id' not in manifest or '__' in manifest['id']:
- raise YunohostError('app_id_invalid')
+ if "id" not in manifest or "__" in manifest["id"] or "." in manifest["id"]:
+ raise YunohostValidationError("app_id_invalid")
- app_id = manifest['id']
+ app_id = manifest["id"]
+ label = label if label else manifest["name"]
# Check requirements
_check_manifest_requirements(manifest, app_id)
- _check_services_status_for_app(manifest.get("services", []))
+ _assert_system_is_sane_for_app(manifest, "pre")
# Check if app can be forked
instance_number = _installed_instance_number(app_id, last=True) + 1
if instance_number > 1:
- if 'multi_instance' not in manifest or not is_true(manifest['multi_instance']):
- raise YunohostError('app_already_installed', app=app_id)
+ if "multi_instance" not in manifest or not is_true(manifest["multi_instance"]):
+ raise YunohostValidationError("app_already_installed", app=app_id)
# Change app_id to the forked app id
- app_instance_name = app_id + '__' + str(instance_number)
+ app_instance_name = app_id + "__" + str(instance_number)
else:
app_instance_name = app_id
# Retrieve arguments list for install script
- args_dict = {} if not args else \
- dict(urlparse.parse_qsl(args, keep_blank_values=True))
- args_odict = _parse_args_from_manifest(manifest, 'install', args=args_dict)
- args_list = [ value[0] for value in args_odict.values() ]
- args_list.append(app_instance_name)
+ raw_questions = manifest.get("arguments", {}).get("install", {})
+ questions = ask_questions_and_parse_answers(raw_questions, prefilled_answers=args)
+ args = {
+ question.name: question.value
+ for question in questions
+ if question.value is not None
+ }
- # Prepare env. var. to pass to script
- env_dict = _make_environment_dict(args_odict)
- env_dict["YNH_APP_ID"] = app_id
- env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name
- env_dict["YNH_APP_INSTANCE_NUMBER"] = str(instance_number)
+ # Validate domain / path availability for webapps
+ path_requirement = _guess_webapp_path_requirement(questions, extracted_app_folder)
+ _validate_webpath_requirement(questions, path_requirement)
- # Start register change on system
- operation_logger.extra.update({'env': env_dict})
+ # Attempt to patch legacy helpers ...
+ _patch_legacy_helpers(extracted_app_folder)
- # Tell the operation_logger to redact all password-type args
- # Also redact the % escaped version of the password that might appear in
- # the 'args' section of metadata (relevant for password with non-alphanumeric char)
- data_to_redact = [ value[0] for value in args_odict.values() if value[1] == "password" ]
- data_to_redact += [ urllib.quote(data) for data in data_to_redact if urllib.quote(data) != data ]
- operation_logger.data_to_redact.extend(data_to_redact)
+ # Apply dirty patch to make php5 apps compatible with php7
+ _patch_legacy_php_versions(extracted_app_folder)
- operation_logger.related_to = [s for s in operation_logger.related_to if s[0] != "app"]
+ # We'll check that the app didn't brutally edit some system configuration
+ manually_modified_files_before_install = manually_modified_files()
+
+ operation_logger.related_to = [
+ s for s in operation_logger.related_to if s[0] != "app"
+ ]
operation_logger.related_to.append(("app", app_id))
operation_logger.start()
@@ -847,80 +938,155 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
# Set initial app settings
app_settings = {
- 'id': app_instance_name,
- 'label': label if label else manifest['name'],
+ "id": app_instance_name,
+ "install_time": int(time.time()),
+ "current_revision": manifest.get("remote", {}).get("revision", "?"),
}
- # TODO: Move install_time away from app settings
- app_settings['install_time'] = status['installed_at']
_set_app_settings(app_instance_name, app_settings)
- # Apply dirty patch to make php5 apps compatible with php7
- _patch_php5(extracted_app_folder)
-
- os.system('chown -R admin: ' + extracted_app_folder)
-
- # Execute App install script
- os.system('chown -hR admin: %s' % INSTALL_TMP)
# Move scripts and manifest to the right place
if os.path.exists(os.path.join(extracted_app_folder, "manifest.json")):
- os.system('cp %s/manifest.json %s' % (extracted_app_folder, app_setting_path))
+ os.system("cp %s/manifest.json %s" % (extracted_app_folder, app_setting_path))
if os.path.exists(os.path.join(extracted_app_folder, "manifest.toml")):
- os.system('cp %s/manifest.toml %s' % (extracted_app_folder, app_setting_path))
- os.system('cp -R %s/scripts %s' % (extracted_app_folder, app_setting_path))
+ os.system("cp %s/manifest.toml %s" % (extracted_app_folder, app_setting_path))
+ os.system("cp -R %s/scripts %s" % (extracted_app_folder, app_setting_path))
- for file_to_copy in ["actions.json", "actions.toml", "config_panel.json", "config_panel.toml", "conf"]:
+ for file_to_copy in [
+ "actions.json",
+ "actions.toml",
+ "config_panel.toml",
+ "conf",
+ ]:
if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)):
- os.system('cp -R %s/%s %s' % (extracted_app_folder, file_to_copy, app_setting_path))
+ os.system(
+ "cp -R %s/%s %s"
+ % (extracted_app_folder, file_to_copy, app_setting_path)
+ )
- # Create permission before the install (useful if the install script redefine the permission)
- # Note that sync_perm is disabled to avoid triggering a whole bunch of code and messages
- # can't be sure that we don't have one case when it's needed
- permission_add(app=app_instance_name, permission="main", sync_perm=False)
+ # Initialize the main permission for the app
+ # The permission is initialized with no url associated, and with tile disabled
+ # For web app, the root path of the app will be added as url and the tile
+ # will be enabled during the app install. C.f. 'app_register_url()' below.
+ permission_create(
+ app_instance_name + ".main",
+ allowed=["all_users"],
+ label=label,
+ show_tile=False,
+ protected=False,
+ )
+
+ # Prepare env. var. to pass to script
+ env_dict = _make_environment_for_app_script(app_instance_name, args=args)
+ env_dict["YNH_APP_BASEDIR"] = extracted_app_folder
+
+ env_dict_for_logging = env_dict.copy()
+ for question in questions:
+ # Or should it be more generally question.redact ?
+ if question.type == "password":
+ del env_dict_for_logging["YNH_APP_ARG_%s" % question.name.upper()]
+
+ operation_logger.extra.update({"env": env_dict_for_logging})
# Execute the app install script
- install_retcode = 1
+ install_failed = True
try:
- install_retcode = hook_exec(
- os.path.join(extracted_app_folder, 'scripts/install'),
- args=args_list, env=env_dict
- )[0]
- except (KeyboardInterrupt, EOFError):
- install_retcode = -1
- except Exception:
- import traceback
- logger.exception(m18n.n('unexpected_error', error=u"\n" + traceback.format_exc()))
+ (
+ install_failed,
+ failure_message_with_debug_instructions,
+ ) = hook_exec_with_script_debug_if_failure(
+ os.path.join(extracted_app_folder, "scripts/install"),
+ env=env_dict,
+ operation_logger=operation_logger,
+ error_message_if_script_failed=m18n.n("app_install_script_failed"),
+ error_message_if_failed=lambda e: m18n.n(
+ "app_install_failed", app=app_id, error=e
+ ),
+ )
finally:
- if install_retcode != 0:
- error_msg = operation_logger.error(m18n.n('unexpected_error', error='shell command return code: %s' % install_retcode))
- if not no_remove_on_failure:
- # Setup environment for remove script
- env_dict_remove = {}
- env_dict_remove["YNH_APP_ID"] = app_id
- env_dict_remove["YNH_APP_INSTANCE_NAME"] = app_instance_name
- env_dict_remove["YNH_APP_INSTANCE_NUMBER"] = str(instance_number)
+ # If success so far, validate that app didn't break important stuff
+ if not install_failed:
+ try:
+ broke_the_system = False
+ _assert_system_is_sane_for_app(manifest, "post")
+ except Exception as e:
+ broke_the_system = True
+ logger.error(m18n.n("app_install_failed", app=app_id, error=str(e)))
+ failure_message_with_debug_instructions = operation_logger.error(str(e))
- # Execute remove script
- operation_logger_remove = OperationLogger('remove_on_failed_install',
- [('app', app_instance_name)],
- env=env_dict_remove)
- operation_logger_remove.start()
+ # We'll check that the app didn't brutally edit some system configuration
+ manually_modified_files_after_install = manually_modified_files()
+ manually_modified_files_by_app = set(
+ manually_modified_files_after_install
+ ) - set(manually_modified_files_before_install)
+ if manually_modified_files_by_app:
+ logger.error(
+ "Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - "
+ + "\n -".join(manually_modified_files_by_app)
+ )
+ # If the install failed or broke the system, we remove it
+ if install_failed or broke_the_system:
+
+ # This option is meant for packagers to debug their apps more easily
+ if no_remove_on_failure:
+ raise YunohostError(
+ "The installation of %s failed, but was not cleaned up as requested by --no-remove-on-failure."
+ % app_id,
+ raw_msg=True,
+ )
+ else:
+ logger.warning(m18n.n("app_remove_after_failed_install"))
+
+ # Setup environment for remove script
+ env_dict_remove = {}
+ env_dict_remove["YNH_APP_ID"] = app_id
+ env_dict_remove["YNH_APP_INSTANCE_NAME"] = app_instance_name
+ env_dict_remove["YNH_APP_INSTANCE_NUMBER"] = str(instance_number)
+ env_dict_remove["YNH_APP_MANIFEST_VERSION"] = manifest.get("version", "?")
+ env_dict_remove["YNH_APP_BASEDIR"] = extracted_app_folder
+
+ # Execute remove script
+ operation_logger_remove = OperationLogger(
+ "remove_on_failed_install",
+ [("app", app_instance_name)],
+ env=env_dict_remove,
+ )
+ operation_logger_remove.start()
+
+ # Try to remove the app
+ try:
remove_retcode = hook_exec(
- os.path.join(extracted_app_folder, 'scripts/remove'),
- args=[app_instance_name], env=env_dict_remove
+ os.path.join(extracted_app_folder, "scripts/remove"),
+ args=[app_instance_name],
+ env=env_dict_remove,
)[0]
- # Remove all permission in LDAP
- result = ldap.search(base='ou=permission,dc=yunohost,dc=org',
- filter='(&(objectclass=permissionYnh)(cn=*.%s))' % app_instance_name, attrs=['cn'])
- permission_list = [p['cn'][0] for p in result]
- for l in permission_list:
- permission_remove(app_instance_name, l.split('.')[0], force=True)
- if remove_retcode != 0:
- msg = m18n.n('app_not_properly_removed',
- app=app_instance_name)
- logger.warning(msg)
- operation_logger_remove.error(msg)
+ # Here again, calling hook_exec could fail miserably, or get
+ # manually interrupted (by mistake or because script was stuck)
+ # In that case we still want to proceed with the rest of the
+ # removal (permissions, /etc/yunohost/apps/{app} ...)
+ except (KeyboardInterrupt, EOFError, Exception):
+ remove_retcode = -1
+ import traceback
+
+ logger.error(
+ m18n.n("unexpected_error", error="\n" + traceback.format_exc())
+ )
+
+ # Remove all permission in LDAP
+ for permission_name in user_permission_list()["permissions"].keys():
+ if permission_name.startswith(app_instance_name + "."):
+ permission_delete(permission_name, force=True, sync_perm=False)
+
+ if remove_retcode != 0:
+ msg = m18n.n("app_not_properly_removed", app=app_instance_name)
+ logger.warning(msg)
+ operation_logger_remove.error(msg)
+ else:
+ try:
+ _assert_system_is_sane_for_app(manifest, "post")
+ except Exception as e:
+ operation_logger_remove.error(e)
else:
operation_logger_remove.success()
@@ -928,119 +1094,112 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
shutil.rmtree(app_setting_path)
shutil.rmtree(extracted_app_folder)
- app_ssowatconf()
+ permission_sync_to_user()
- if packages.dpkg_is_broken():
- logger.error(m18n.n("this_action_broke_dpkg"))
-
- if install_retcode == -1:
- msg = m18n.n('operation_interrupted') + " " + error_msg
- raise YunohostError(msg, raw_msg=True)
- msg = error_msg
- raise YunohostError(msg, raw_msg=True)
+ raise YunohostError(failure_message_with_debug_instructions, raw_msg=True)
# Clean hooks and add new ones
hook_remove(app_instance_name)
- if 'hooks' in os.listdir(extracted_app_folder):
- for file in os.listdir(extracted_app_folder + '/hooks'):
- hook_add(app_instance_name, extracted_app_folder + '/hooks/' + file)
-
- # Store app status
- with open(app_setting_path + '/status.json', 'w+') as f:
- json.dump(status, f)
+ if "hooks" in os.listdir(extracted_app_folder):
+ for file in os.listdir(extracted_app_folder + "/hooks"):
+ hook_add(app_instance_name, extracted_app_folder + "/hooks/" + file)
# Clean and set permissions
shutil.rmtree(extracted_app_folder)
- os.system('chmod -R 400 %s' % app_setting_path)
- os.system('chown -R root: %s' % app_setting_path)
- os.system('chown -R admin: %s/scripts' % app_setting_path)
+ os.system("chmod 600 %s" % app_setting_path)
+ os.system("chmod 400 %s/settings.yml" % app_setting_path)
+ os.system("chown -R root: %s" % app_setting_path)
- # Add path in permission if it's defined in the app install script
- app_settings = _get_app_settings(app_instance_name)
- domain = app_settings.get('domain', None)
- path = app_settings.get('path', None)
- if domain and path:
- permission_update(app_instance_name, permission="main", add_url=[domain+path], sync_perm=False)
+ logger.success(m18n.n("installation_complete"))
- permission_sync_to_user()
-
- logger.success(m18n.n('installation_complete'))
-
- hook_callback('post_app_install', args=args_list, env=env_dict)
+ hook_callback("post_app_install", env=env_dict)
@is_unit_operation()
-def app_remove(operation_logger, app):
+def app_remove(operation_logger, app, purge=False):
"""
Remove app
- Keyword argument:
+ Keyword arguments:
app -- App(s) to delete
+ purge -- Remove with all app data
"""
- from yunohost.utils.ldap import _get_ldap_interface
from yunohost.hook import hook_exec, hook_remove, hook_callback
- from yunohost.permission import permission_remove, permission_sync_to_user
+ from yunohost.permission import (
+ user_permission_list,
+ permission_delete,
+ permission_sync_to_user,
+ )
+
if not _is_installed(app):
- raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id())
+ raise YunohostValidationError(
+ "app_not_installed", app=app, all_apps=_get_all_installed_apps_id()
+ )
operation_logger.start()
logger.info(m18n.n("app_start_remove", app=app))
- app_setting_path = APPS_SETTING_PATH + app
+ app_setting_path = os.path.join(APPS_SETTING_PATH, app)
- # TODO: display fail messages from script
- try:
- shutil.rmtree('/tmp/yunohost_remove')
- except:
- pass
+ # Attempt to patch legacy helpers ...
+ _patch_legacy_helpers(app_setting_path)
# Apply dirty patch to make php5 apps compatible with php7 (e.g. the remove
# script might date back from jessie install)
- _patch_php5(app_setting_path)
+ _patch_legacy_php_versions(app_setting_path)
- os.system('cp -a %s /tmp/yunohost_remove && chown -hR admin: /tmp/yunohost_remove' % app_setting_path)
- os.system('chown -R admin: /tmp/yunohost_remove')
- os.system('chmod -R u+rX /tmp/yunohost_remove')
-
- args_list = [app]
+ manifest = _get_manifest_of_app(app_setting_path)
+ tmp_workdir_for_app = _make_tmp_workdir_for_app(app=app)
+ remove_script = f"{tmp_workdir_for_app}/scripts/remove"
env_dict = {}
app_id, app_instance_nb = _parse_app_instance_name(app)
env_dict["YNH_APP_ID"] = app_id
env_dict["YNH_APP_INSTANCE_NAME"] = app
env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb)
- operation_logger.extra.update({'env': env_dict})
+ env_dict["YNH_APP_MANIFEST_VERSION"] = manifest.get("version", "?")
+ env_dict["YNH_APP_PURGE"] = str(purge)
+ env_dict["YNH_APP_BASEDIR"] = tmp_workdir_for_app
+
+ operation_logger.extra.update({"env": env_dict})
operation_logger.flush()
- if hook_exec('/tmp/yunohost_remove/scripts/remove', args=args_list,
- env=env_dict)[0] == 0:
- logger.success(m18n.n('app_removed', app=app))
+ try:
+ ret = hook_exec(remove_script, env=env_dict)[0]
+ # Here again, calling hook_exec could fail miserably, or get
+ # manually interrupted (by mistake or because script was stuck)
+ # In that case we still want to proceed with the rest of the
+ # removal (permissions, /etc/yunohost/apps/{app} ...)
+ except (KeyboardInterrupt, EOFError, Exception):
+ ret = -1
+ import traceback
- hook_callback('post_app_remove', args=args_list, env=env_dict)
+ logger.error(m18n.n("unexpected_error", error="\n" + traceback.format_exc()))
+ finally:
+ shutil.rmtree(tmp_workdir_for_app)
+
+ if ret == 0:
+ logger.success(m18n.n("app_removed", app=app))
+ hook_callback("post_app_remove", env=env_dict)
+ else:
+ logger.warning(m18n.n("app_not_properly_removed", app=app))
+
+ # Remove all permission in LDAP
+ for permission_name in user_permission_list(apps=[app])["permissions"].keys():
+ permission_delete(permission_name, force=True, sync_perm=False)
if os.path.exists(app_setting_path):
shutil.rmtree(app_setting_path)
- shutil.rmtree('/tmp/yunohost_remove')
+
hook_remove(app)
- # Remove all permission in LDAP
- ldap = _get_ldap_interface()
- result = ldap.search(base='ou=permission,dc=yunohost,dc=org',
- filter='(&(objectclass=permissionYnh)(cn=*.%s))' % app, attrs=['cn'])
- permission_list = [p['cn'][0] for p in result]
- for l in permission_list:
- permission_remove(app, l.split('.')[0], force=True, sync_perm=False)
-
permission_sync_to_user()
-
- if packages.dpkg_is_broken():
- raise YunohostError("this_action_broke_dpkg")
+ _assert_system_is_sane_for_app(manifest, "post")
-@is_unit_operation(['permission','app'])
-def app_addaccess(operation_logger, apps, users=[]):
+def app_addaccess(apps, users=[]):
"""
Grant access right to users (everyone by default)
@@ -1051,15 +1210,17 @@ def app_addaccess(operation_logger, apps, users=[]):
"""
from yunohost.permission import user_permission_update
- permission = user_permission_update(operation_logger, app=apps, permission="main", add_username=users)
+ output = {}
+ for app in apps:
+ permission = user_permission_update(
+ app + ".main", add=users, remove="all_users"
+ )
+ output[app] = permission["corresponding_users"]
- result = {p : v['main']['allowed_users'] for p, v in permission['permissions'].items()}
-
- return {'allowed_users': result}
+ return {"allowed_users": output}
-@is_unit_operation(['permission','app'])
-def app_removeaccess(operation_logger, apps, users=[]):
+def app_removeaccess(apps, users=[]):
"""
Revoke access right to users (everyone by default)
@@ -1070,15 +1231,15 @@ def app_removeaccess(operation_logger, apps, users=[]):
"""
from yunohost.permission import user_permission_update
- permission = user_permission_update(operation_logger, app=apps, permission="main", del_username=users)
+ output = {}
+ for app in apps:
+ permission = user_permission_update(app + ".main", remove=users)
+ output[app] = permission["corresponding_users"]
- result = {p : v['main']['allowed_users'] for p, v in permission['permissions'].items()}
-
- return {'allowed_users': result}
+ return {"allowed_users": output}
-@is_unit_operation(['permission','app'])
-def app_clearaccess(operation_logger, apps):
+def app_clearaccess(apps):
"""
Reset access rights for the app
@@ -1086,34 +1247,14 @@ def app_clearaccess(operation_logger, apps):
apps
"""
- from yunohost.permission import user_permission_clear
+ from yunohost.permission import user_permission_reset
- permission = user_permission_clear(operation_logger, app=apps, permission="main")
+ output = {}
+ for app in apps:
+ permission = user_permission_reset(app + ".main")
+ output[app] = permission["corresponding_users"]
- result = {p : v['main']['allowed_users'] for p, v in permission['permissions'].items()}
-
- return {'allowed_users': result}
-
-def app_debug(app):
- """
- Display debug informations for an app
-
- Keyword argument:
- app
- """
- manifest = _get_manifest_of_app(os.path.join(APPS_SETTING_PATH, app))
-
- return {
- 'name': manifest['id'],
- 'label': manifest['name'],
- 'services': [{
- "name": x,
- "logs": [{
- "file_name": y,
- "file_content": "\n".join(z),
- } for (y, z) in sorted(service_log(x).items(), key=lambda x: x[0])],
- } for x in sorted(manifest.get("services", []))]
- }
+ return {"allowed_users": output}
@is_unit_operation()
@@ -1126,45 +1267,49 @@ def app_makedefault(operation_logger, app, domain=None):
domain
"""
- from yunohost.domain import domain_list
+ from yunohost.domain import _assert_domain_exists
app_settings = _get_app_settings(app)
- app_domain = app_settings['domain']
- app_path = app_settings['path']
+ app_domain = app_settings["domain"]
+ app_path = app_settings["path"]
if domain is None:
domain = app_domain
- operation_logger.related_to.append(('domain', domain))
- elif domain not in domain_list()['domains']:
- raise YunohostError('domain_unknown')
+
+ _assert_domain_exists(domain)
+
+ operation_logger.related_to.append(("domain", domain))
+
+ if "/" in app_map(raw=True)[domain]:
+ raise YunohostValidationError(
+ "app_make_default_location_already_used",
+ app=app,
+ domain=app_domain,
+ other_app=app_map(raw=True)[domain]["/"]["id"],
+ )
operation_logger.start()
- if '/' in app_map(raw=True)[domain]:
- raise YunohostError('app_make_default_location_already_used', app=app, domain=app_domain,
- other_app=app_map(raw=True)[domain]["/"]["id"])
- try:
- with open('/etc/ssowat/conf.json.persistent') as json_conf:
- ssowat_conf = json.loads(str(json_conf.read()))
- except ValueError as e:
- raise YunohostError('ssowat_persistent_conf_read_error', error=e)
- except IOError:
+ # TODO / FIXME : current trick is to add this to conf.json.persisten
+ # This is really not robust and should be improved
+ # e.g. have a flag in /etc/yunohost/apps/$app/ to say that this is the
+ # default app or idk...
+ if not os.path.exists("/etc/ssowat/conf.json.persistent"):
ssowat_conf = {}
+ else:
+ ssowat_conf = read_json("/etc/ssowat/conf.json.persistent")
- if 'redirected_urls' not in ssowat_conf:
- ssowat_conf['redirected_urls'] = {}
+ if "redirected_urls" not in ssowat_conf:
+ ssowat_conf["redirected_urls"] = {}
- ssowat_conf['redirected_urls'][domain + '/'] = app_domain + app_path
+ ssowat_conf["redirected_urls"][domain + "/"] = app_domain + app_path
- try:
- with open('/etc/ssowat/conf.json.persistent', 'w+') as f:
- json.dump(ssowat_conf, f, sort_keys=True, indent=4)
- except IOError as e:
- raise YunohostError('ssowat_persistent_conf_write_error', error=e)
+ write_to_json(
+ "/etc/ssowat/conf.json.persistent", ssowat_conf, sort_keys=True, indent=4
+ )
+ os.system("chmod 644 /etc/ssowat/conf.json.persistent")
- os.system('chmod 644 /etc/ssowat/conf.json.persistent')
-
- logger.success(m18n.n('ssowat_conf_updated'))
+ logger.success(m18n.n("ssowat_conf_updated"))
def app_setting(app, key, value=None, delete=False):
@@ -1180,39 +1325,137 @@ def app_setting(app, key, value=None, delete=False):
"""
app_settings = _get_app_settings(app) or {}
- if value is None and not delete:
- try:
- return app_settings[key]
- except Exception as e:
- logger.debug("cannot get app setting '%s' for '%s' (%s)", key, app, e)
- return None
- else:
- if delete and key in app_settings:
- del app_settings[key]
+ #
+ # Legacy permission setting management
+ # (unprotected, protected, skipped_uri/regex)
+ #
+
+ is_legacy_permission_setting = any(
+ key.startswith(word + "_") for word in ["unprotected", "protected", "skipped"]
+ )
+
+ if is_legacy_permission_setting:
+
+ from yunohost.permission import (
+ user_permission_list,
+ user_permission_update,
+ permission_create,
+ permission_delete,
+ permission_url,
+ )
+
+ permissions = user_permission_list(full=True, apps=[app])["permissions"]
+ permission_name = "%s.legacy_%s_uris" % (app, key.split("_")[0])
+ permission = permissions.get(permission_name)
+
+ # GET
+ if value is None and not delete:
+ return (
+ ",".join(permission.get("uris", []) + permission["additional_urls"])
+ if permission
+ else None
+ )
+
+ # DELETE
+ if delete:
+ # If 'is_public' setting still exists, we interpret this as
+ # coming from a legacy app (because new apps shouldn't manage the
+ # is_public state themselves anymore...)
+ #
+ # In that case, we interpret the request for "deleting
+ # unprotected/skipped" setting as willing to make the app
+ # private
+ if (
+ "is_public" in app_settings
+ and "visitors" in permissions[app + ".main"]["allowed"]
+ ):
+ if key.startswith("unprotected_") or key.startswith("skipped_"):
+ user_permission_update(app + ".main", remove="visitors")
+
+ if permission:
+ permission_delete(permission_name)
+
+ # SET
else:
- # FIXME: Allow multiple values for some keys?
- if key in ['redirected_urls', 'redirected_regex']:
- value = yaml.load(value)
- app_settings[key] = value
- _set_app_settings(app, app_settings)
+ urls = value
+ # If the request is about the root of the app (/), ( = the vast majority of cases)
+ # we interpret this as a change for the main permission
+ # (i.e. allowing/disallowing visitors)
+ if urls == "/":
+ if key.startswith("unprotected_") or key.startswith("skipped_"):
+ permission_url(app + ".main", url="/", sync_perm=False)
+ user_permission_update(app + ".main", add="visitors")
+ else:
+ user_permission_update(app + ".main", remove="visitors")
+ else:
-def app_checkport(port):
- """
- Check availability of a local port
+ urls = urls.split(",")
+ if key.endswith("_regex"):
+ urls = ["re:" + url for url in urls]
- Keyword argument:
- port -- Port to check
+ if permission:
+ # In case of new regex, save the urls, to add a new time in the additional_urls
+ # In case of new urls, we do the same thing but inversed
+ if key.endswith("_regex"):
+ # List of urls to save
+ current_urls_or_regex = [
+ url
+ for url in permission["additional_urls"]
+ if not url.startswith("re:")
+ ]
+ else:
+ # List of regex to save
+ current_urls_or_regex = [
+ url
+ for url in permission["additional_urls"]
+ if url.startswith("re:")
+ ]
- """
+ new_urls = urls + current_urls_or_regex
+ # We need to clear urls because in the old setting the new setting override the old one and dont just add some urls
+ permission_url(permission_name, clear_urls=True, sync_perm=False)
+ permission_url(permission_name, add_url=new_urls)
+ else:
+ from yunohost.utils.legacy import legacy_permission_label
- # This import cannot be moved on top of file because it create a recursive
- # import...
- from yunohost.tools import tools_port_available
- if tools_port_available(port):
- logger.success(m18n.n('port_available', port=int(port)))
+ # Let's create a "special" permission for the legacy settings
+ permission_create(
+ permission=permission_name,
+ # FIXME find a way to limit to only the user allowed to the main permission
+ allowed=["all_users"]
+ if key.startswith("protected_")
+ else ["all_users", "visitors"],
+ url=None,
+ additional_urls=urls,
+ auth_header=not key.startswith("skipped_"),
+ label=legacy_permission_label(app, key.split("_")[0]),
+ show_tile=False,
+ protected=True,
+ )
+
+ return
+
+ #
+ # Regular setting management
+ #
+
+ # GET
+ if value is None and not delete:
+ return app_settings.get(key, None)
+
+ # DELETE
+ if delete:
+ if key in app_settings:
+ del app_settings[key]
+
+ # SET
else:
- raise YunohostError('port_unavailable', port=int(port))
+ if key in ["redirected_urls", "redirected_regex"]:
+ value = yaml.safe_load(value)
+ app_settings[key] = value
+
+ _set_app_settings(app, app_settings)
def app_register_url(app, domain, path):
@@ -1224,125 +1467,38 @@ def app_register_url(app, domain, path):
domain -- The domain on which the app should be registered (e.g. your.domain.tld)
path -- The path to be registered (e.g. /coffee)
"""
+ from yunohost.permission import (
+ permission_url,
+ user_permission_update,
+ permission_sync_to_user,
+ )
- # This line can't be moved on top of file, otherwise it creates an infinite
- # loop of import with tools.py...
- from .domain import _get_conflicting_apps, _normalize_domain_path
-
- domain, path = _normalize_domain_path(domain, path)
+ domain = DomainQuestion.normalize(domain)
+ path = PathQuestion.normalize(path)
# We cannot change the url of an app already installed simply by changing
# the settings...
- installed = app in app_list(installed=True, raw=True).keys()
- if installed:
+ if _is_installed(app):
settings = _get_app_settings(app)
if "path" in settings.keys() and "domain" in settings.keys():
- raise YunohostError('app_already_installed_cant_change_url')
+ raise YunohostValidationError("app_already_installed_cant_change_url")
# Check the url is available
- conflicts = _get_conflicting_apps(domain, path)
- if conflicts:
- apps = []
- for path, app_id, app_label in conflicts:
- apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format(
- domain=domain,
- path=path,
- app_id=app_id,
- app_label=app_label,
- ))
+ _assert_no_conflicting_apps(domain, path)
- raise YunohostError('app_location_unavailable', apps="\n".join(apps))
+ app_setting(app, "domain", value=domain)
+ app_setting(app, "path", value=path)
- app_setting(app, 'domain', value=domain)
- app_setting(app, 'path', value=path)
-
-
-def app_checkurl(url, app=None):
- """
- Check availability of a web path
-
- Keyword argument:
- url -- Url to check
- app -- Write domain & path to app settings for further checks
-
- """
-
- logger.error("Packagers /!\\ : 'app checkurl' is deprecated ! Please use the helper 'ynh_webpath_register' instead !")
-
- from yunohost.domain import domain_list, _normalize_domain_path
-
- if "https://" == url[:8]:
- url = url[8:]
- elif "http://" == url[:7]:
- url = url[7:]
-
- if url[-1:] != '/':
- url = url + '/'
-
- domain = url[:url.index('/')]
- path = url[url.index('/'):]
- installed = False
-
- domain, path = _normalize_domain_path(domain, path)
-
- apps_map = app_map(raw=True)
-
- if domain not in domain_list()['domains']:
- raise YunohostError('domain_unknown')
-
- if domain in apps_map:
- # Loop through apps
- for p, a in apps_map[domain].items():
- # Skip requested app checking
- if app is not None and a['id'] == app:
- installed = True
- continue
- if path == p:
- raise YunohostError('app_location_already_used', app=a["id"], path=path)
- # can't install "/a/b/" if "/a/" exists
- elif path.startswith(p) or p.startswith(path):
- raise YunohostError('app_location_install_failed', other_path=p, other_app=a['id'])
-
- if app is not None and not installed:
- app_setting(app, 'domain', value=domain)
- app_setting(app, 'path', value=path)
-
-
-def app_initdb(user, password=None, db=None, sql=None):
- """
- Create database and initialize it with optionnal attached script
-
- Keyword argument:
- db -- DB name (user unless set)
- user -- Name of the DB user
- password -- Password of the DB (generated unless set)
- sql -- Initial SQL file
-
- """
-
- logger.error("Packagers /!\\ : 'app initdb' is deprecated ! Please use the helper 'ynh_mysql_setup_db' instead !")
-
- if db is None:
- db = user
-
- return_pwd = False
- if password is None:
- password = random_password(12)
- return_pwd = True
-
- mysql_root_pwd = open('/etc/yunohost/mysql').read().rstrip()
- mysql_command = 'mysql -u root -p%s -e "CREATE DATABASE %s ; GRANT ALL PRIVILEGES ON %s.* TO \'%s\'@localhost IDENTIFIED BY \'%s\';"' % (mysql_root_pwd, db, db, user, password)
- if os.system(mysql_command) != 0:
- raise YunohostError('mysql_db_creation_failed')
- if sql is not None:
- if os.system('mysql -u %s -p%s %s < %s' % (user, password, db, sql)) != 0:
- raise YunohostError('mysql_db_init_failed')
-
- if return_pwd:
- return password
-
- logger.success(m18n.n('mysql_db_initialized'))
+ # Initially, the .main permission is created with no url at all associated
+ # When the app register/books its web url, we also add the url '/'
+ # (meaning the root of the app, domain.tld/path/)
+ # and enable the tile to the SSO, and both of this should match 95% of apps
+ # For more specific cases, the app is free to change / add urls or disable
+ # the tile using the permission helpers.
+ permission_url(app + ".main", url="/", sync_perm=False)
+ user_permission_update(app + ".main", show_tile=True, sync_perm=False)
+ permission_sync_to_user()
def app_ssowatconf():
@@ -1352,119 +1508,109 @@ def app_ssowatconf():
"""
from yunohost.domain import domain_list, _get_maindomain
- from yunohost.user import user_list
from yunohost.permission import user_permission_list
main_domain = _get_maindomain()
- domains = domain_list()['domains']
+ domains = domain_list()["domains"]
+ all_permissions = user_permission_list(
+ full=True, ignore_system_perms=True, absolute_urls=True
+ )["permissions"]
- skipped_urls = []
- skipped_regex = []
- unprotected_urls = []
- unprotected_regex = []
- protected_urls = []
- protected_regex = []
- redirected_regex = {main_domain + '/yunohost[\/]?$': 'https://' + main_domain + '/yunohost/sso/'}
+ permissions = {
+ "core_skipped": {
+ "users": [],
+ "label": "Core permissions - skipped",
+ "show_tile": False,
+ "auth_header": False,
+ "public": True,
+ "uris": [domain + "/yunohost/admin" for domain in domains]
+ + [domain + "/yunohost/api" for domain in domains]
+ + [
+ "re:^[^/]*/%.well%-known/ynh%-diagnosis/.*$",
+ "re:^[^/]*/%.well%-known/acme%-challenge/.*$",
+ "re:^[^/]*/%.well%-known/autoconfig/mail/config%-v1%.1%.xml.*$",
+ ],
+ }
+ }
+ redirected_regex = {
+ main_domain + r"/yunohost[\/]?$": "https://" + main_domain + "/yunohost/sso/"
+ }
redirected_urls = {}
- try:
- apps_list = app_list(installed=True)['apps']
- except Exception as e:
- logger.debug("cannot get installed app list because %s", e)
- apps_list = []
+ for app in _installed_apps():
- def _get_setting(settings, name):
- s = settings.get(name, None)
- return s.split(',') if s else []
+ app_settings = read_yaml(APPS_SETTING_PATH + app + "/settings.yml")
- for app in apps_list:
- with open(APPS_SETTING_PATH + app['id'] + '/settings.yml') as f:
- app_settings = yaml.load(f)
+ # Redirected
+ redirected_urls.update(app_settings.get("redirected_urls", {}))
+ redirected_regex.update(app_settings.get("redirected_regex", {}))
- if 'no_sso' in app_settings:
- continue
+ # New permission system
+ for perm_name, perm_info in all_permissions.items():
- for item in _get_setting(app_settings, 'skipped_uris'):
- if item[-1:] == '/':
- item = item[:-1]
- skipped_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item)
- for item in _get_setting(app_settings, 'skipped_regex'):
- skipped_regex.append(item)
- for item in _get_setting(app_settings, 'unprotected_uris'):
- if item[-1:] == '/':
- item = item[:-1]
- unprotected_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item)
- for item in _get_setting(app_settings, 'unprotected_regex'):
- unprotected_regex.append(item)
- for item in _get_setting(app_settings, 'protected_uris'):
- if item[-1:] == '/':
- item = item[:-1]
- protected_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item)
- for item in _get_setting(app_settings, 'protected_regex'):
- protected_regex.append(item)
- if 'redirected_urls' in app_settings:
- redirected_urls.update(app_settings['redirected_urls'])
- if 'redirected_regex' in app_settings:
- redirected_regex.update(app_settings['redirected_regex'])
+ uris = (
+ []
+ + ([perm_info["url"]] if perm_info["url"] else [])
+ + perm_info["additional_urls"]
+ )
- for domain in domains:
- skipped_urls.extend([domain + '/yunohost/admin', domain + '/yunohost/api'])
+ # Ignore permissions for which there's no url defined
+ if not uris:
+ continue
- # Authorize ACME challenge url
- skipped_regex.append("^[^/]*/%.well%-known/acme%-challenge/.*$")
- skipped_regex.append("^[^/]*/%.well%-known/autoconfig/mail/config%-v1%.1%.xml.*$")
-
- permission = {}
- for a in user_permission_list()['permissions'].values():
- for p in a.values():
- if 'URL' in p:
- for u in p['URL']:
- permission[u] = p['allowed_users']
+ permissions[perm_name] = {
+ "users": perm_info["corresponding_users"],
+ "label": perm_info["label"],
+ "show_tile": perm_info["show_tile"]
+ and perm_info["url"]
+ and (not perm_info["url"].startswith("re:")),
+ "auth_header": perm_info["auth_header"],
+ "public": "visitors" in perm_info["allowed"],
+ "uris": uris,
+ }
conf_dict = {
- 'portal_domain': main_domain,
- 'portal_path': '/yunohost/sso/',
- 'additional_headers': {
- 'Auth-User': 'uid',
- 'Remote-User': 'uid',
- 'Name': 'cn',
- 'Email': 'mail'
+ "portal_domain": main_domain,
+ "portal_path": "/yunohost/sso/",
+ "additional_headers": {
+ "Auth-User": "uid",
+ "Remote-User": "uid",
+ "Name": "cn",
+ "Email": "mail",
},
- 'domains': domains,
- 'skipped_urls': skipped_urls,
- 'unprotected_urls': unprotected_urls,
- 'protected_urls': protected_urls,
- 'skipped_regex': skipped_regex,
- 'unprotected_regex': unprotected_regex,
- 'protected_regex': protected_regex,
- 'redirected_urls': redirected_urls,
- 'redirected_regex': redirected_regex,
- 'users': {username: app_map(user=username)
- for username in user_list()['users'].keys()},
- 'permission': permission,
+ "domains": domains,
+ "redirected_urls": redirected_urls,
+ "redirected_regex": redirected_regex,
+ "permissions": permissions,
}
- with open('/etc/ssowat/conf.json', 'w+') as f:
- json.dump(conf_dict, f, sort_keys=True, indent=4)
+ write_to_json("/etc/ssowat/conf.json", conf_dict, sort_keys=True, indent=4)
- logger.debug(m18n.n('ssowat_conf_generated'))
+ from .utils.legacy import translate_legacy_rules_in_ssowant_conf_json_persistent
+
+ translate_legacy_rules_in_ssowant_conf_json_persistent()
+
+ logger.debug(m18n.n("ssowat_conf_generated"))
def app_change_label(app, new_label):
+ from yunohost.permission import user_permission_update
+
installed = _is_installed(app)
if not installed:
- raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id())
-
- app_setting(app, "label", value=new_label)
-
- app_ssowatconf()
+ raise YunohostValidationError(
+ "app_not_installed", app=app, all_apps=_get_all_installed_apps_id()
+ )
+ logger.warning(m18n.n("app_label_deprecated"))
+ user_permission_update(app + ".main", label=new_label)
# actions todo list:
# * docstring
+
def app_action_list(app):
- logger.warning(m18n.n('experimental_feature'))
+ logger.warning(m18n.n("experimental_feature"))
# this will take care of checking if the app is installed
app_info_dict = app_info(app)
@@ -1472,211 +1618,183 @@ def app_action_list(app):
return {
"app": app,
"app_name": app_info_dict["name"],
- "actions": _get_app_actions(app)
+ "actions": _get_app_actions(app),
}
@is_unit_operation()
def app_action_run(operation_logger, app, action, args=None):
- logger.warning(m18n.n('experimental_feature'))
+ logger.warning(m18n.n("experimental_feature"))
from yunohost.hook import hook_exec
- import tempfile
# will raise if action doesn't exist
actions = app_action_list(app)["actions"]
actions = {x["id"]: x for x in actions}
if action not in actions:
- raise YunohostError("action '%s' not available for app '%s', available actions are: %s" % (action, app, ", ".join(actions.keys())), raw_msg=True)
+ raise YunohostValidationError(
+ "action '%s' not available for app '%s', available actions are: %s"
+ % (action, app, ", ".join(actions.keys())),
+ raw_msg=True,
+ )
operation_logger.start()
action_declaration = actions[action]
# Retrieve arguments list for install script
- args_dict = dict(urlparse.parse_qsl(args, keep_blank_values=True)) if args else {}
- args_odict = _parse_args_for_action(actions[action], args=args_dict)
- args_list = [value[0] for value in args_odict.values()]
+ raw_questions = actions[action].get("arguments", {})
+ questions = ask_questions_and_parse_answers(raw_questions, prefilled_answers=args)
+ args = {
+ question.name: question.value
+ for question in questions
+ if question.value is not None
+ }
- app_id, app_instance_nb = _parse_app_instance_name(app)
+ tmp_workdir_for_app = _make_tmp_workdir_for_app(app=app)
- env_dict = _make_environment_dict(args_odict, prefix="ACTION_")
- env_dict["YNH_APP_ID"] = app_id
- env_dict["YNH_APP_INSTANCE_NAME"] = app
- env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb)
+ env_dict = _make_environment_for_app_script(app, args=args, args_prefix="ACTION_")
env_dict["YNH_ACTION"] = action
+ env_dict["YNH_APP_BASEDIR"] = tmp_workdir_for_app
- _, path = tempfile.mkstemp()
+ _, action_script = tempfile.mkstemp(dir=tmp_workdir_for_app)
- with open(path, "w") as script:
+ with open(action_script, "w") as script:
script.write(action_declaration["command"])
- os.chmod(path, 700)
-
if action_declaration.get("cwd"):
- cwd = action_declaration["cwd"].replace("$app", app_id)
+ cwd = action_declaration["cwd"].replace("$app", app)
else:
- cwd = "/etc/yunohost/apps/" + app
+ cwd = tmp_workdir_for_app
- retcode = hook_exec(
- path,
- args=args_list,
- env=env_dict,
- chdir=cwd,
- user=action_declaration.get("user", "root"),
- )[0]
+ try:
+ retcode = hook_exec(
+ action_script,
+ env=env_dict,
+ chdir=cwd,
+ user=action_declaration.get("user", "root"),
+ )[0]
+ # Calling hook_exec could fail miserably, or get
+ # manually interrupted (by mistake or because script was stuck)
+ # In that case we still want to delete the tmp work dir
+ except (KeyboardInterrupt, EOFError, Exception):
+ retcode = -1
+ import traceback
+
+ logger.error(m18n.n("unexpected_error", error="\n" + traceback.format_exc()))
+ finally:
+ shutil.rmtree(tmp_workdir_for_app)
if retcode not in action_declaration.get("accepted_return_codes", [0]):
- msg = "Error while executing action '%s' of app '%s': return code %s" % (action, app, retcode)
+ msg = "Error while executing action '%s' of app '%s': return code %s" % (
+ action,
+ app,
+ retcode,
+ )
operation_logger.error(msg)
raise YunohostError(msg, raw_msg=True)
- os.remove(path)
-
operation_logger.success()
return logger.success("Action successed!")
-# Config panel todo list:
-# * docstrings
-# * merge translations on the json once the workflow is in place
-@is_unit_operation()
-def app_config_show_panel(operation_logger, app):
- logger.warning(m18n.n('experimental_feature'))
+def app_config_get(app, key="", full=False, export=False):
+ """
+ Display an app configuration in classic, full or export mode
+ """
+ if full and export:
+ raise YunohostValidationError(
+ "You can't use --full and --export together.", raw_msg=True
+ )
- from yunohost.hook import hook_exec
+ if full:
+ mode = "full"
+ elif export:
+ mode = "export"
+ else:
+ mode = "classic"
- # this will take care of checking if the app is installed
- app_info_dict = app_info(app)
-
- operation_logger.start()
- config_panel = _get_app_config_panel(app)
- config_script = os.path.join(APPS_SETTING_PATH, app, 'scripts', 'config')
-
- app_id, app_instance_nb = _parse_app_instance_name(app)
-
- if not config_panel or not os.path.exists(config_script):
- return {
- "app_id": app_id,
- "app": app,
- "app_name": app_info_dict["name"],
- "config_panel": [],
- }
-
- env = {
- "YNH_APP_ID": app_id,
- "YNH_APP_INSTANCE_NAME": app,
- "YNH_APP_INSTANCE_NUMBER": str(app_instance_nb),
- }
-
- return_code, parsed_values = hook_exec(config_script,
- args=["show"],
- env=env,
- return_format="plain_dict"
- )
-
- if return_code != 0:
- raise Exception("script/config show return value code: %s (considered as an error)", return_code)
-
- logger.debug("Generating global variables:")
- for tab in config_panel.get("panel", []):
- tab_id = tab["id"] # this makes things easier to debug on crash
- for section in tab.get("sections", []):
- section_id = section["id"]
- for option in section.get("options", []):
- option_name = option["name"]
- generated_name = ("YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_name)).upper()
- option["name"] = generated_name
- logger.debug(" * '%s'.'%s'.'%s' -> %s", tab.get("name"), section.get("name"), option.get("name"), generated_name)
-
- if generated_name in parsed_values:
- # code is not adapted for that so we have to mock expected format :/
- if option.get("type") == "boolean":
- if parsed_values[generated_name].lower() in ("true", "1", "y"):
- option["default"] = parsed_values[generated_name]
- else:
- del option["default"]
- else:
- option["default"] = parsed_values[generated_name]
-
- args_dict = _parse_args_in_yunohost_format(
- [{option["name"]: parsed_values[generated_name]}],
- [option]
- )
- option["default"] = args_dict[option["name"]][0]
- else:
- logger.debug("Variable '%s' is not declared by config script, using default", generated_name)
- # do nothing, we'll use the default if present
-
- return {
- "app_id": app_id,
- "app": app,
- "app_name": app_info_dict["name"],
- "config_panel": config_panel,
- "logs": operation_logger.success(),
- }
+ config_ = AppConfigPanel(app)
+ return config_.get(key, mode)
@is_unit_operation()
-def app_config_apply(operation_logger, app, args):
- logger.warning(m18n.n('experimental_feature'))
+def app_config_set(
+ operation_logger, app, key=None, value=None, args=None, args_file=None
+):
+ """
+ Apply a new app configuration
+ """
- from yunohost.hook import hook_exec
+ config_ = AppConfigPanel(app)
- installed = _is_installed(app)
- if not installed:
- raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id())
+ return config_.set(key, value, args, args_file, operation_logger=operation_logger)
- config_panel = _get_app_config_panel(app)
- config_script = os.path.join(APPS_SETTING_PATH, app, 'scripts', 'config')
- if not config_panel or not os.path.exists(config_script):
- # XXX real exception
- raise Exception("Not config-panel.json nor scripts/config")
+class AppConfigPanel(ConfigPanel):
+ def __init__(self, app):
- operation_logger.start()
- app_id, app_instance_nb = _parse_app_instance_name(app)
- env = {
- "YNH_APP_ID": app_id,
- "YNH_APP_INSTANCE_NAME": app,
- "YNH_APP_INSTANCE_NUMBER": str(app_instance_nb),
- }
- args = dict(urlparse.parse_qsl(args, keep_blank_values=True)) if args else {}
+ # Check app is installed
+ _assert_is_installed(app)
- for tab in config_panel.get("panel", []):
- tab_id = tab["id"] # this makes things easier to debug on crash
- for section in tab.get("sections", []):
- section_id = section["id"]
- for option in section.get("options", []):
- option_name = option["name"]
- generated_name = ("YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_name)).upper()
+ self.app = app
+ config_path = os.path.join(APPS_SETTING_PATH, app, "config_panel.toml")
+ super().__init__(config_path=config_path)
- if generated_name in args:
- logger.debug("include into env %s=%s", generated_name, args[generated_name])
- env[generated_name] = args[generated_name]
- else:
- logger.debug("no value for key id %s", generated_name)
+ def _load_current_values(self):
+ self.values = self._call_config_script("show")
- # for debug purpose
- for key in args:
- if key not in env:
- logger.warning("Ignore key '%s' from arguments because it is not in the config", key)
+ def _apply(self):
+ env = {key: str(value) for key, value in self.new_values.items()}
+ return_content = self._call_config_script("apply", env=env)
- return_code = hook_exec(config_script,
- args=["apply"],
- env=env,
- )[0]
+ # If the script returned validation error
+ # raise a ValidationError exception using
+ # the first key
+ if return_content:
+ for key, message in return_content.get("validation_errors").items():
+ raise YunohostValidationError(
+ "app_argument_invalid",
+ name=key,
+ error=message,
+ )
- if return_code != 0:
- msg = "'script/config apply' return value code: %s (considered as an error)" % return_code
- operation_logger.error(msg)
- raise Exception(msg)
+ def _call_config_script(self, action, env={}):
+ from yunohost.hook import hook_exec
- logger.success("Config updated as expected")
- return {
- "logs": operation_logger.success(),
- }
+ # Add default config script if needed
+ config_script = os.path.join(APPS_SETTING_PATH, self.app, "scripts", "config")
+ if not os.path.exists(config_script):
+ logger.debug("Adding a default config script")
+ default_script = """#!/bin/bash
+source /usr/share/yunohost/helpers
+ynh_abort_if_errors
+ynh_app_config_run $1
+"""
+ write_to_file(config_script, default_script)
+
+ # Call config script to extract current values
+ logger.debug(f"Calling '{action}' action from config script")
+ app_id, app_instance_nb = _parse_app_instance_name(self.app)
+ settings = _get_app_settings(app_id)
+ env.update(
+ {
+ "app_id": app_id,
+ "app": self.app,
+ "app_instance_nb": str(app_instance_nb),
+ "final_path": settings.get("final_path", ""),
+ "YNH_APP_BASEDIR": os.path.join(APPS_SETTING_PATH, self.app),
+ }
+ )
+
+ ret, values = hook_exec(config_script, args=[action], env=env)
+ if ret != 0:
+ if action == "show":
+ raise YunohostError("app_config_unable_to_read")
+ else:
+ raise YunohostError("app_config_unable_to_apply")
+ return values
def _get_all_installed_apps_id():
@@ -1687,8 +1805,7 @@ def _get_all_installed_apps_id():
* ...'
"""
- all_apps_ids = [x["id"] for x in app_list(installed=True)["apps"]]
- all_apps_ids = sorted(all_apps_ids)
+ all_apps_ids = sorted(_installed_apps())
all_apps_ids_formatted = "\n * ".join(all_apps_ids)
all_apps_ids_formatted = "\n * " + all_apps_ids_formatted
@@ -1698,8 +1815,8 @@ def _get_all_installed_apps_id():
def _get_app_actions(app_id):
"Get app config panel stored in json or in toml"
- actions_toml_path = os.path.join(APPS_SETTING_PATH, app_id, 'actions.toml')
- actions_json_path = os.path.join(APPS_SETTING_PATH, app_id, 'actions.json')
+ actions_toml_path = os.path.join(APPS_SETTING_PATH, app_id, "actions.toml")
+ actions_json_path = os.path.join(APPS_SETTING_PATH, app_id, "actions.json")
# sample data to get an idea of what is going on
# this toml extract:
@@ -1781,132 +1898,6 @@ def _get_app_actions(app_id):
return None
-def _get_app_config_panel(app_id):
- "Get app config panel stored in json or in toml"
- config_panel_toml_path = os.path.join(APPS_SETTING_PATH, app_id, 'config_panel.toml')
- config_panel_json_path = os.path.join(APPS_SETTING_PATH, app_id, 'config_panel.json')
-
- # sample data to get an idea of what is going on
- # this toml extract:
- #
- # version = "0.1"
- # name = "Unattended-upgrades configuration panel"
- #
- # [main]
- # name = "Unattended-upgrades configuration"
- #
- # [main.unattended_configuration]
- # name = "50unattended-upgrades configuration file"
- #
- # [main.unattended_configuration.upgrade_level]
- # name = "Choose the sources of packages to automatically upgrade."
- # default = "Security only"
- # type = "text"
- # help = "We can't use a choices field for now. In the meantime please choose between one of this values:
Security only, Security and updates."
- # # choices = ["Security only", "Security and updates"]
-
- # [main.unattended_configuration.ynh_update]
- # name = "Would you like to update YunoHost packages automatically ?"
- # type = "bool"
- # default = true
- #
- # will be parsed into this:
- #
- # OrderedDict([(u'version', u'0.1'),
- # (u'name', u'Unattended-upgrades configuration panel'),
- # (u'main',
- # OrderedDict([(u'name', u'Unattended-upgrades configuration'),
- # (u'unattended_configuration',
- # OrderedDict([(u'name',
- # u'50unattended-upgrades configuration file'),
- # (u'upgrade_level',
- # OrderedDict([(u'name',
- # u'Choose the sources of packages to automatically upgrade.'),
- # (u'default',
- # u'Security only'),
- # (u'type', u'text'),
- # (u'help',
- # u"We can't use a choices field for now. In the meantime please choose between one of this values:
Security only, Security and updates.")])),
- # (u'ynh_update',
- # OrderedDict([(u'name',
- # u'Would you like to update YunoHost packages automatically ?'),
- # (u'type', u'bool'),
- # (u'default', True)])),
- #
- # and needs to be converted into this:
- #
- # {u'name': u'Unattended-upgrades configuration panel',
- # u'panel': [{u'id': u'main',
- # u'name': u'Unattended-upgrades configuration',
- # u'sections': [{u'id': u'unattended_configuration',
- # u'name': u'50unattended-upgrades configuration file',
- # u'options': [{u'//': u'"choices" : ["Security only", "Security and updates"]',
- # u'default': u'Security only',
- # u'help': u"We can't use a choices field for now. In the meantime please choose between one of this values:
Security only, Security and updates.",
- # u'id': u'upgrade_level',
- # u'name': u'Choose the sources of packages to automatically upgrade.',
- # u'type': u'text'},
- # {u'default': True,
- # u'id': u'ynh_update',
- # u'name': u'Would you like to update YunoHost packages automatically ?',
- # u'type': u'bool'},
-
- if os.path.exists(config_panel_toml_path):
- toml_config_panel = toml.load(open(config_panel_toml_path, "r"), _dict=OrderedDict)
-
- # transform toml format into json format
- config_panel = {
- "name": toml_config_panel["name"],
- "version": toml_config_panel["version"],
- "panel": [],
- }
-
- panels = filter(lambda (key, value): key not in ("name", "version")
- and isinstance(value, OrderedDict),
- toml_config_panel.items())
-
- for key, value in panels:
- panel = {
- "id": key,
- "name": value["name"],
- "sections": [],
- }
-
- sections = filter(lambda (k, v): k not in ("name",)
- and isinstance(v, OrderedDict),
- value.items())
-
- for section_key, section_value in sections:
- section = {
- "id": section_key,
- "name": section_value["name"],
- "options": [],
- }
-
- options = filter(lambda (k, v): k not in ("name",)
- and isinstance(v, OrderedDict),
- section_value.items())
-
- for option_key, option_value in options:
- option = dict(option_value)
- option["name"] = option_key
- option["ask"] = {"en": option["ask"]}
- if "help" in option:
- option["help"] = {"en": option["help"]}
- section["options"].append(option)
-
- panel["sections"].append(section)
-
- config_panel["panel"].append(panel)
-
- return config_panel
-
- elif os.path.exists(config_panel_json_path):
- return json.load(open(config_panel_json_path))
-
- return None
-
-
def _get_app_settings(app_id):
"""
Get settings of an installed app
@@ -1916,16 +1907,34 @@ def _get_app_settings(app_id):
"""
if not _is_installed(app_id):
- raise YunohostError('app_not_installed', app=app_id, all_apps=_get_all_installed_apps_id())
+ raise YunohostValidationError(
+ "app_not_installed", app=app_id, all_apps=_get_all_installed_apps_id()
+ )
try:
- with open(os.path.join(
- APPS_SETTING_PATH, app_id, 'settings.yml')) as f:
- settings = yaml.load(f)
- if app_id == settings['id']:
+ with open(os.path.join(APPS_SETTING_PATH, app_id, "settings.yml")) as f:
+ settings = yaml.safe_load(f)
+ # If label contains unicode char, this may later trigger issues when building strings...
+ # FIXME: this should be propagated to read_yaml so that this fix applies everywhere I think...
+ settings = {k: v for k, v in settings.items()}
+
+ # Stupid fix for legacy bullshit
+ # In the past, some setups did not have proper normalization for app domain/path
+ # Meaning some setups (as of January 2021) still have path=/foobar/ (with a trailing slash)
+ # resulting in stupid issue unless apps using ynh_app_normalize_path_stuff
+ # So we yolofix the settings if such an issue is found >_>
+ # A simple call to `yunohost app list` (which happens quite often) should be enough
+ # to migrate all app settings ... so this can probably be removed once we're past Bullseye...
+ if settings.get("path") != "/" and (
+ settings.get("path", "").endswith("/")
+ or not settings.get("path", "/").startswith("/")
+ ):
+ settings["path"] = "/" + settings["path"].strip("/")
+ _set_app_settings(app_id, settings)
+
+ if app_id == settings["id"]:
return settings
except (IOError, TypeError, KeyError):
- logger.exception(m18n.n('app_not_correctly_installed',
- app=app_id))
+ logger.error(m18n.n("app_not_correctly_installed", app=app_id))
return {}
@@ -1938,110 +1947,56 @@ def _set_app_settings(app_id, settings):
settings -- Dict with app settings
"""
- with open(os.path.join(
- APPS_SETTING_PATH, app_id, 'settings.yml'), 'w') as f:
+ with open(os.path.join(APPS_SETTING_PATH, app_id, "settings.yml"), "w") as f:
yaml.safe_dump(settings, f, default_flow_style=False)
-def _get_app_status(app_id, format_date=False):
+def _extract_app_from_file(path):
"""
- Get app status or create it if needed
-
- Keyword arguments:
- app_id -- The app id
- format_date -- Format date fields
-
- """
- app_setting_path = APPS_SETTING_PATH + app_id
- if not os.path.isdir(app_setting_path):
- raise YunohostError('app_unknown')
- status = {}
-
- regen_status = True
- try:
- with open(app_setting_path + '/status.json') as f:
- status = json.loads(str(f.read()))
- regen_status = False
- except IOError:
- logger.debug("status file not found for '%s'", app_id,
- exc_info=1)
- except Exception as e:
- logger.warning("could not open or decode %s : %s ... regenerating.", app_setting_path + '/status.json', str(e))
-
- if regen_status:
- # Create app status
- status = {
- 'installed_at': app_setting(app_id, 'install_time'),
- 'upgraded_at': app_setting(app_id, 'update_time'),
- 'remote': {'type': None},
- }
- with open(app_setting_path + '/status.json', 'w+') as f:
- json.dump(status, f)
-
- if format_date:
- for f in ['installed_at', 'upgraded_at']:
- v = status.get(f, None)
- if not v:
- status[f] = '-'
- else:
- status[f] = datetime.utcfromtimestamp(v)
- return status
-
-
-def _extract_app_from_file(path, remove=False):
- """
- Unzip or untar application tarball in APP_TMP_FOLDER, or copy it from a directory
+ Unzip / untar / copy application tarball or directory to a tmp work directory
Keyword arguments:
path -- Path of the tarball or directory
- remove -- Remove the tarball after extraction
-
- Returns:
- Dict manifest
-
"""
- logger.debug(m18n.n('extracting'))
-
- if os.path.exists(APP_TMP_FOLDER):
- shutil.rmtree(APP_TMP_FOLDER)
- os.makedirs(APP_TMP_FOLDER)
+ logger.debug(m18n.n("extracting"))
path = os.path.abspath(path)
+ extracted_app_folder = _make_tmp_workdir_for_app()
+
if ".zip" in path:
- extract_result = os.system('unzip %s -d %s > /dev/null 2>&1' % (path, APP_TMP_FOLDER))
- if remove:
- os.remove(path)
+ extract_result = os.system(
+ f"unzip '{path}' -d {extracted_app_folder} > /dev/null 2>&1"
+ )
elif ".tar" in path:
- extract_result = os.system('tar -xf %s -C %s > /dev/null 2>&1' % (path, APP_TMP_FOLDER))
- if remove:
- os.remove(path)
+ extract_result = os.system(
+ f"tar -xf '{path}' -C {extracted_app_folder} > /dev/null 2>&1"
+ )
elif os.path.isdir(path):
- shutil.rmtree(APP_TMP_FOLDER)
- if path[-1] != '/':
- path = path + '/'
- extract_result = os.system('cp -a "%s" %s' % (path, APP_TMP_FOLDER))
+ shutil.rmtree(extracted_app_folder)
+ if path[-1] != "/":
+ path = path + "/"
+ extract_result = os.system(f"cp -a '{path}' {extracted_app_folder}")
else:
extract_result = 1
if extract_result != 0:
- raise YunohostError('app_extraction_failed')
+ raise YunohostError("app_extraction_failed")
try:
- extracted_app_folder = APP_TMP_FOLDER
if len(os.listdir(extracted_app_folder)) == 1:
for folder in os.listdir(extracted_app_folder):
- extracted_app_folder = extracted_app_folder + '/' + folder
+ extracted_app_folder = extracted_app_folder + "/" + folder
manifest = _get_manifest_of_app(extracted_app_folder)
- manifest['lastUpdate'] = int(time.time())
+ manifest["lastUpdate"] = int(time.time())
except IOError:
- raise YunohostError('app_install_files_invalid')
+ raise YunohostError("app_install_files_invalid")
except ValueError as e:
- raise YunohostError('app_manifest_invalid', error=e)
+ raise YunohostError("app_manifest_invalid", error=e)
- logger.debug(m18n.n('done'))
+ logger.debug(m18n.n("done"))
- manifest['remote'] = {'type': 'file', 'path': path}
+ manifest["remote"] = {"type": "file", "path": path}
return manifest, extracted_app_folder
@@ -2156,14 +2111,10 @@ def _get_manifest_of_app(path):
manifest = manifest_toml.copy()
- if "arguments" not in manifest:
- return manifest
-
- if "install" not in manifest["arguments"]:
- return manifest
-
install_arguments = []
- for name, values in manifest_toml.get("arguments", {}).get("install", {}).items():
+ for name, values in (
+ manifest_toml.get("arguments", {}).get("install", {}).items()
+ ):
args = values.copy()
args["name"] = name
@@ -2171,14 +2122,80 @@ def _get_manifest_of_app(path):
manifest["arguments"]["install"] = install_arguments
- return manifest
elif os.path.exists(os.path.join(path, "manifest.json")):
- return read_json(os.path.join(path, "manifest.json"))
+ manifest = read_json(os.path.join(path, "manifest.json"))
else:
- return None
+ raise YunohostError(
+ "There doesn't seem to be any manifest file in %s ... It looks like an app was not correctly installed/removed."
+ % path,
+ raw_msg=True,
+ )
+
+ manifest["arguments"] = _set_default_ask_questions(manifest.get("arguments", {}))
+ return manifest
-def _get_git_last_commit_hash(repository, reference='HEAD'):
+def _set_default_ask_questions(arguments):
+
+ # arguments is something like
+ # { "install": [
+ # { "name": "domain",
+ # "type": "domain",
+ # ....
+ # },
+ # { "name": "path",
+ # "type": "path"
+ # ...
+ # },
+ # ...
+ # ],
+ # "upgrade": [ ... ]
+ # }
+
+ # We set a default for any question with these matching (type, name)
+ # type namei
+ # N.B. : this is only for install script ... should be reworked for other
+ # scripts if we supports args for other scripts in the future...
+ questions_with_default = [
+ ("domain", "domain"), # i18n: app_manifest_install_ask_domain
+ ("path", "path"), # i18n: app_manifest_install_ask_path
+ ("password", "password"), # i18n: app_manifest_install_ask_password
+ ("user", "admin"), # i18n: app_manifest_install_ask_admin
+ ("boolean", "is_public"),
+ ] # i18n: app_manifest_install_ask_is_public
+
+ for script_name, arg_list in arguments.items():
+
+ # We only support questions for install so far, and for other
+ if script_name != "install":
+ continue
+
+ for arg in arg_list:
+
+ # Do not override 'ask' field if provided by app ?... Or shall we ?
+ # if "ask" in arg:
+ # continue
+
+ # If this arg corresponds to a question with default ask message...
+ if any(
+ (arg.get("type"), arg["name"]) == question
+ for question in questions_with_default
+ ):
+ # The key is for example "app_manifest_install_ask_domain"
+ key = "app_manifest_%s_ask_%s" % (script_name, arg["name"])
+ arg["ask"] = m18n.n(key)
+
+ # Also it in fact doesn't make sense for any of those questions to have an example value nor a default value...
+ if arg.get("type") in ["domain", "user", "password"]:
+ if "example" in arg:
+ del arg["example"]
+ if "default" in arg:
+ del arg["domain"]
+
+ return arguments
+
+
+def _get_git_last_commit_hash(repository, reference="HEAD"):
"""
Attempt to retrieve the last commit hash of a git repository
@@ -2187,12 +2204,12 @@ def _get_git_last_commit_hash(repository, reference='HEAD'):
"""
try:
- commit = subprocess.check_output(
- "git ls-remote --exit-code {0} {1} | awk '{{print $1}}'".format(
- repository, reference),
- shell=True)
+ cmd = "git ls-remote --exit-code {0} {1} | awk '{{print $1}}'".format(
+ repository, reference
+ )
+ commit = check_output(cmd)
except subprocess.CalledProcessError:
- logger.exception("unable to get last commit from %s", repository)
+ logger.error("unable to get last commit from %s", repository)
raise ValueError("Unable to get last commit with git")
else:
return commit.strip()
@@ -2200,131 +2217,76 @@ def _get_git_last_commit_hash(repository, reference='HEAD'):
def _fetch_app_from_git(app):
"""
- Unzip or untar application tarball in APP_TMP_FOLDER
+ Unzip or untar application tarball to a tmp directory
Keyword arguments:
app -- App_id or git repo URL
-
- Returns:
- Dict manifest
-
"""
- extracted_app_folder = APP_TMP_FOLDER
- app_tmp_archive = '{0}.zip'.format(extracted_app_folder)
- if os.path.exists(extracted_app_folder):
- shutil.rmtree(extracted_app_folder)
- if os.path.exists(app_tmp_archive):
- os.remove(app_tmp_archive)
-
- logger.debug(m18n.n('downloading'))
-
- if ('@' in app) or ('http://' in app) or ('https://' in app):
+ # Extract URL, branch and revision to download
+ if ("@" in app) or ("http://" in app) or ("https://" in app):
url = app
- branch = 'master'
- github_repo = re_github_repo.match(app)
- if github_repo:
- if github_repo.group('tree'):
- branch = github_repo.group('tree')
- url = "https://github.com/{owner}/{repo}".format(
- owner=github_repo.group('owner'),
- repo=github_repo.group('repo'),
- )
- tarball_url = "{url}/archive/{tree}.zip".format(
- url=url, tree=branch
- )
- try:
- subprocess.check_call([
- 'wget', '-qO', app_tmp_archive, tarball_url])
- except subprocess.CalledProcessError:
- logger.exception('unable to download %s', tarball_url)
- raise YunohostError('app_sources_fetch_failed')
- else:
- manifest, extracted_app_folder = _extract_app_from_file(
- app_tmp_archive, remove=True)
- else:
- tree_index = url.rfind('/tree/')
- if tree_index > 0:
- url = url[:tree_index]
- branch = app[tree_index + 6:]
- try:
- # We use currently git 2.1 so we can't use --shallow-submodules
- # option. When git will be in 2.9 (with the new debian version)
- # we will be able to use it. Without this option all the history
- # of the submodules repo is downloaded.
- subprocess.check_call([
- 'git', 'clone', '-b', branch, '--single-branch', '--recursive', '--depth=1', url,
- extracted_app_folder])
- subprocess.check_call([
- 'git', 'reset', '--hard', branch
- ], cwd=extracted_app_folder)
- manifest = _get_manifest_of_app(extracted_app_folder)
- except subprocess.CalledProcessError:
- raise YunohostError('app_sources_fetch_failed')
- except ValueError as e:
- raise YunohostError('app_manifest_invalid', error=e)
- else:
- logger.debug(m18n.n('done'))
+ branch = "master"
+ if "/tree/" in url:
+ url, branch = url.split("/tree/", 1)
+ revision = "HEAD"
+ else:
+ app_dict = _load_apps_catalog()["apps"]
- # Store remote repository info into the returned manifest
- manifest['remote'] = {'type': 'git', 'url': url, 'branch': branch}
+ app_id, _ = _parse_app_instance_name(app)
+
+ if app_id not in app_dict:
+ raise YunohostValidationError("app_unknown")
+ elif "git" not in app_dict[app_id]:
+ raise YunohostValidationError("app_unsupported_remote_type")
+
+ app_info = app_dict[app_id]
+ url = app_info["git"]["url"]
+ branch = app_info["git"]["branch"]
+ revision = str(app_info["git"]["revision"])
+
+ extracted_app_folder = _make_tmp_workdir_for_app()
+
+ logger.debug(m18n.n("downloading"))
+
+ # Download only this commit
+ try:
+ # We don't use git clone because, git clone can't download
+ # a specific revision only
+ run_commands([["git", "init", extracted_app_folder]], shell=False)
+ run_commands(
+ [
+ ["git", "remote", "add", "origin", url],
+ [
+ "git",
+ "fetch",
+ "--depth=1",
+ "origin",
+ branch if revision == "HEAD" else revision,
+ ],
+ ["git", "reset", "--hard", "FETCH_HEAD"],
+ ],
+ cwd=extracted_app_folder,
+ shell=False,
+ )
+ manifest = _get_manifest_of_app(extracted_app_folder)
+ except subprocess.CalledProcessError:
+ raise YunohostError("app_sources_fetch_failed")
+ except ValueError as e:
+ raise YunohostError("app_manifest_invalid", error=e)
+ else:
+ logger.debug(m18n.n("done"))
+
+ # Store remote repository info into the returned manifest
+ manifest["remote"] = {"type": "git", "url": url, "branch": branch}
+ if revision == "HEAD":
try:
- revision = _get_git_last_commit_hash(url, branch)
+ manifest["remote"]["revision"] = _get_git_last_commit_hash(url, branch)
except Exception as e:
logger.debug("cannot get last commit hash because: %s ", e)
- else:
- manifest['remote']['revision'] = revision
else:
- app_dict = app_list(raw=True)
-
- if app in app_dict:
- app_info = app_dict[app]
- app_info['manifest']['lastUpdate'] = app_info['lastUpdate']
- manifest = app_info['manifest']
- else:
- raise YunohostError('app_unknown')
-
- if 'git' not in app_info:
- raise YunohostError('app_unsupported_remote_type')
- url = app_info['git']['url']
-
- if 'github.com' in url:
- tarball_url = "{url}/archive/{tree}.zip".format(
- url=url, tree=app_info['git']['revision']
- )
- try:
- subprocess.check_call([
- 'wget', '-qO', app_tmp_archive, tarball_url])
- except subprocess.CalledProcessError:
- logger.exception('unable to download %s', tarball_url)
- raise YunohostError('app_sources_fetch_failed')
- else:
- manifest, extracted_app_folder = _extract_app_from_file(
- app_tmp_archive, remove=True)
- else:
- try:
- subprocess.check_call([
- 'git', 'clone', app_info['git']['url'],
- '-b', app_info['git']['branch'], extracted_app_folder])
- subprocess.check_call([
- 'git', 'reset', '--hard',
- str(app_info['git']['revision'])
- ], cwd=extracted_app_folder)
- manifest = _get_manifest_of_app(extracted_app_folder)
- except subprocess.CalledProcessError:
- raise YunohostError('app_sources_fetch_failed')
- except ValueError as e:
- raise YunohostError('app_manifest_invalid', error=e)
- else:
- logger.debug(m18n.n('done'))
-
- # Store remote repository info into the returned manifest
- manifest['remote'] = {
- 'type': 'git',
- 'url': url,
- 'branch': app_info['git']['branch'],
- 'revision': app_info['git']['revision'],
- }
+ manifest["remote"]["revision"] = revision
+ manifest["lastUpdate"] = app_info["lastUpdate"]
return manifest, extracted_app_folder
@@ -2352,10 +2314,10 @@ def _installed_instance_number(app, last=False):
for installed_app in installed_apps:
if number == 0 and app == installed_app:
number = 1
- elif '__' in installed_app:
- if app == installed_app[:installed_app.index('__')]:
- if int(installed_app[installed_app.index('__') + 2:]) > number:
- number = int(installed_app[installed_app.index('__') + 2:])
+ elif "__" in installed_app:
+ if app == installed_app[: installed_app.index("__")]:
+ if int(installed_app[installed_app.index("__") + 2 :]) > number:
+ number = int(installed_app[installed_app.index("__") + 2 :])
return number
@@ -2364,7 +2326,7 @@ def _installed_instance_number(app, last=False):
instances_dict = app_map(app=app, raw=True)
for key, domain in instances_dict.items():
for key, path in domain.items():
- instance_number_list.append(path['instance'])
+ instance_number_list.append(path["instance"])
return sorted(instance_number_list)
@@ -2383,295 +2345,175 @@ def _is_installed(app):
return os.path.isdir(APPS_SETTING_PATH + app)
-def _value_for_locale(values):
- """
- Return proper value for current locale
-
- Keyword arguments:
- values -- A dict of values associated to their locale
-
- Returns:
- An utf-8 encoded string
-
- """
- if not isinstance(values, dict):
- return values
-
- for lang in [m18n.locale, m18n.default_locale]:
- try:
- return _encode_string(values[lang])
- except KeyError:
- continue
-
- # Fallback to first value
- return _encode_string(values.values()[0])
+def _assert_is_installed(app):
+ if not _is_installed(app):
+ raise YunohostValidationError(
+ "app_not_installed", app=app, all_apps=_get_all_installed_apps_id()
+ )
-def _encode_string(value):
- """
- Return the string encoded in utf-8 if needed
- """
- if isinstance(value, unicode):
- return value.encode('utf8')
- return value
+def _installed_apps():
+ return os.listdir(APPS_SETTING_PATH)
def _check_manifest_requirements(manifest, app_instance_name):
"""Check if required packages are met from the manifest"""
- requirements = manifest.get('requirements', dict())
- # FIXME: Deprecate min_version key
- if 'min_version' in manifest:
- requirements['yunohost'] = '>> {0}'.format(manifest['min_version'])
- logger.debug("the manifest key 'min_version' is deprecated, "
- "use 'requirements' instead.")
+ packaging_format = int(manifest.get("packaging_format", 0))
+ if packaging_format not in [0, 1]:
+ raise YunohostValidationError("app_packaging_format_not_supported")
- # Validate multi-instance app
- if is_true(manifest.get('multi_instance', False)):
- # Handle backward-incompatible change introduced in yunohost >= 2.3.6
- # See https://github.com/YunoHost/issues/issues/156
- yunohost_req = requirements.get('yunohost', None)
- if (not yunohost_req or
- not packages.SpecifierSet(yunohost_req) & '>= 2.3.6'):
- raise YunohostError('{0}{1}'.format(
- m18n.g('colon', m18n.n('app_incompatible'), app=app_instance_name),
- m18n.n('app_package_need_update', app=app_instance_name)))
- elif not requirements:
+ requirements = manifest.get("requirements", dict())
+
+ if not requirements:
return
- logger.debug(m18n.n('app_requirements_checking', app=app_instance_name))
-
- # Retrieve versions of each required package
- try:
- versions = packages.get_installed_version(
- *requirements.keys(), strict=True, as_dict=True)
- except packages.PackageException as e:
- raise YunohostError('app_requirements_failed', error=str(e), app=app_instance_name)
+ logger.debug(m18n.n("app_requirements_checking", app=app_instance_name))
# Iterate over requirements
for pkgname, spec in requirements.items():
- version = versions[pkgname]
- if version not in packages.SpecifierSet(spec):
- raise YunohostError('app_requirements_unmeet',
- pkgname=pkgname, version=version,
- spec=spec, app=app_instance_name)
+ if not packages.meets_version_specifier(pkgname, spec):
+ version = packages.ynh_packages_version()[pkgname]["version"]
+ raise YunohostValidationError(
+ "app_requirements_unmeet",
+ pkgname=pkgname,
+ version=version,
+ spec=spec,
+ app=app_instance_name,
+ )
-def _parse_args_from_manifest(manifest, action, args={}):
- """Parse arguments needed for an action from the manifest
-
- Retrieve specified arguments for the action from the manifest, and parse
- given args according to that. If some required arguments are not provided,
- its values will be asked if interaction is possible.
- Parsed arguments will be returned as an OrderedDict
-
- Keyword arguments:
- manifest -- The app manifest to use
- action -- The action to retrieve arguments for
- args -- A dictionnary of arguments to parse
-
- """
- if action not in manifest['arguments']:
- logger.debug("no arguments found for '%s' in manifest", action)
- return OrderedDict()
-
- action_args = manifest['arguments'][action]
- return _parse_args_in_yunohost_format(args, action_args)
-
-
-def _parse_args_for_action(action, args={}):
- """Parse arguments needed for an action from the actions list
-
- Retrieve specified arguments for the action from the manifest, and parse
- given args according to that. If some required arguments are not provided,
- its values will be asked if interaction is possible.
- Parsed arguments will be returned as an OrderedDict
-
- Keyword arguments:
- action -- The action
- args -- A dictionnary of arguments to parse
-
- """
- args_dict = OrderedDict()
-
- if 'arguments' not in action:
- logger.debug("no arguments found for '%s' in manifest", action)
- return args_dict
-
- action_args = action['arguments']
-
- return _parse_args_in_yunohost_format(args, action_args)
-
-
-def _parse_args_in_yunohost_format(args, action_args):
- """Parse arguments store in either manifest.json or actions.json
- """
- from yunohost.domain import (domain_list, _get_maindomain,
- _get_conflicting_apps, _normalize_domain_path)
- from yunohost.user import user_info, user_list
-
- args_dict = OrderedDict()
-
- for arg in action_args:
- arg_name = arg['name']
- arg_type = arg.get('type', 'string')
- arg_default = arg.get('default', None)
- arg_choices = arg.get('choices', [])
- arg_value = None
-
- # Transpose default value for boolean type and set it to
- # false if not defined.
- if arg_type == 'boolean':
- arg_default = 1 if arg_default else 0
-
- # do not print for webadmin
- if arg_type == 'display_text' and msettings.get('interface') != 'api':
- print(_value_for_locale(arg['ask']))
- continue
-
- # Attempt to retrieve argument value
- if arg_name in args:
- arg_value = args[arg_name]
- else:
- if 'ask' in arg:
- # Retrieve proper ask string
- ask_string = _value_for_locale(arg['ask'])
-
- # Append extra strings
- if arg_type == 'boolean':
- ask_string += ' [yes | no]'
- elif arg_choices:
- ask_string += ' [{0}]'.format(' | '.join(arg_choices))
-
- if arg_default is not None:
- if arg_type == 'boolean':
- ask_string += ' (default: {0})'.format("yes" if arg_default == 1 else "no")
- else:
- ask_string += ' (default: {0})'.format(arg_default)
-
- # Check for a password argument
- is_password = True if arg_type == 'password' else False
-
- if arg_type == 'domain':
- arg_default = _get_maindomain()
- ask_string += ' (default: {0})'.format(arg_default)
- msignals.display(m18n.n('domains_available'))
- for domain in domain_list()['domains']:
- msignals.display("- {}".format(domain))
-
- elif arg_type == 'user':
- msignals.display(m18n.n('users_available'))
- for user in user_list()['users'].keys():
- msignals.display("- {}".format(user))
-
- elif arg_type == 'password':
- msignals.display(m18n.n('good_practices_about_user_password'))
-
- try:
- input_string = msignals.prompt(ask_string, is_password)
- except NotImplementedError:
- input_string = None
- if (input_string == '' or input_string is None) \
- and arg_default is not None:
- arg_value = arg_default
- else:
- arg_value = input_string
- elif arg_default is not None:
- arg_value = arg_default
-
- # If the value is empty (none or '')
- # then check if arg is optional or not
- if arg_value is None or arg_value == '':
- if arg.get("optional", False):
- # Argument is optional, keep an empty value
- # and that's all for this arg !
- args_dict[arg_name] = ('', arg_type)
- continue
- else:
- # The argument is required !
- raise YunohostError('app_argument_required', name=arg_name)
-
- # Validate argument choice
- if arg_choices and arg_value not in arg_choices:
- raise YunohostError('app_argument_choice_invalid', name=arg_name, choices=', '.join(arg_choices))
-
- # Validate argument type
- if arg_type == 'domain':
- if arg_value not in domain_list()['domains']:
- raise YunohostError('app_argument_invalid', name=arg_name, error=m18n.n('domain_unknown'))
- elif arg_type == 'user':
- try:
- user_info(arg_value)
- except YunohostError as e:
- raise YunohostError('app_argument_invalid', name=arg_name, error=e)
- elif arg_type == 'app':
- if not _is_installed(arg_value):
- raise YunohostError('app_argument_invalid', name=arg_name, error=m18n.n('app_unknown'))
- elif arg_type == 'boolean':
- if isinstance(arg_value, bool):
- arg_value = 1 if arg_value else 0
- else:
- if str(arg_value).lower() in ["1", "yes", "y"]:
- arg_value = 1
- elif str(arg_value).lower() in ["0", "no", "n"]:
- arg_value = 0
- else:
- raise YunohostError('app_argument_choice_invalid', name=arg_name, choices='yes, no, y, n, 1, 0')
- elif arg_type == 'password':
- forbidden_chars = "{}"
- if any(char in arg_value for char in forbidden_chars):
- raise YunohostError('pattern_password_app', forbidden_chars=forbidden_chars)
- from yunohost.utils.password import assert_password_is_strong_enough
- assert_password_is_strong_enough('user', arg_value)
- args_dict[arg_name] = (arg_value, arg_type)
-
- # END loop over action_args...
+def _guess_webapp_path_requirement(questions: List[Question], app_folder: str) -> str:
# If there's only one "domain" and "path", validate that domain/path
# is an available url and normalize the path.
- domain_args = [ (name, value[0]) for name, value in args_dict.items() if value[1] == "domain" ]
- path_args = [ (name, value[0]) for name, value in args_dict.items() if value[1] == "path" ]
+ domain_questions = [question for question in questions if question.type == "domain"]
+ path_questions = [question for question in questions if question.type == "path"]
- if len(domain_args) == 1 and len(path_args) == 1:
+ if len(domain_questions) == 0 and len(path_questions) == 0:
+ return ""
+ if len(domain_questions) == 1 and len(path_questions) == 1:
+ return "domain_and_path"
+ if len(domain_questions) == 1 and len(path_questions) == 0:
+ # This is likely to be a full-domain app...
- domain = domain_args[0][1]
- path = path_args[0][1]
- domain, path = _normalize_domain_path(domain, path)
+ # Confirm that this is a full-domain app This should cover most cases
+ # ... though anyway the proper solution is to implement some mechanism
+ # in the manifest for app to declare that they require a full domain
+ # (among other thing) so that we can dynamically check/display this
+ # requirement on the webadmin form and not miserably fail at submit time
- # Check the url is available
- conflicts = _get_conflicting_apps(domain, path)
- if conflicts:
- apps = []
- for path, app_id, app_label in conflicts:
- apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format(
+ # Full-domain apps typically declare something like path_url="/" or path=/
+ # and use ynh_webpath_register or yunohost_app_checkurl inside the install script
+ install_script_content = read_file(os.path.join(app_folder, "scripts/install"))
+
+ if re.search(
+ r"\npath(_url)?=[\"']?/[\"']?", install_script_content
+ ) and re.search(r"ynh_webpath_register", install_script_content):
+ return "full_domain"
+
+ return "?"
+
+
+def _validate_webpath_requirement(
+ questions: List[Question], path_requirement: str
+) -> None:
+
+ domain_questions = [question for question in questions if question.type == "domain"]
+ path_questions = [question for question in questions if question.type == "path"]
+
+ if path_requirement == "domain_and_path":
+
+ domain = domain_questions[0].value
+ path = path_questions[0].value
+ _assert_no_conflicting_apps(domain, path, full_domain=True)
+
+ elif path_requirement == "full_domain":
+
+ domain = domain_questions[0].value
+ _assert_no_conflicting_apps(domain, "/", full_domain=True)
+
+
+def _get_conflicting_apps(domain, path, ignore_app=None):
+ """
+ Return a list of all conflicting apps with a domain/path (it can be empty)
+
+ Keyword argument:
+ domain -- The domain for the web path (e.g. your.domain.tld)
+ path -- The path to check (e.g. /coffee)
+ ignore_app -- An optional app id to ignore (c.f. the change_url usecase)
+ """
+
+ from yunohost.domain import _assert_domain_exists
+
+ domain = DomainQuestion.normalize(domain)
+ path = PathQuestion.normalize(path)
+
+ # Abort if domain is unknown
+ _assert_domain_exists(domain)
+
+ # Fetch apps map
+ apps_map = app_map(raw=True)
+
+ # Loop through all apps to check if path is taken by one of them
+ conflicts = []
+ if domain in apps_map:
+ # Loop through apps
+ for p, a in apps_map[domain].items():
+ if a["id"] == ignore_app:
+ continue
+ if path == p:
+ conflicts.append((p, a["id"], a["label"]))
+ # We also don't want conflicts with other apps starting with
+ # same name
+ elif path.startswith(p) or p.startswith(path):
+ conflicts.append((p, a["id"], a["label"]))
+
+ return conflicts
+
+
+def _assert_no_conflicting_apps(domain, path, ignore_app=None, full_domain=False):
+
+ conflicts = _get_conflicting_apps(domain, path, ignore_app)
+
+ if conflicts:
+ apps = []
+ for path, app_id, app_label in conflicts:
+ apps.append(
+ " * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format(
domain=domain,
path=path,
app_id=app_id,
app_label=app_label,
- ))
+ )
+ )
- raise YunohostError('app_location_unavailable', apps="\n".join(apps))
-
- # (We save this normalized path so that the install script have a
- # standard path format to deal with no matter what the user inputted)
- args_dict[path_args[0][0]] = (path, "path")
-
- return args_dict
+ if full_domain:
+ raise YunohostValidationError("app_full_domain_unavailable", domain=domain)
+ else:
+ raise YunohostValidationError(
+ "app_location_unavailable", apps="\n".join(apps)
+ )
-def _make_environment_dict(args_dict, prefix="APP_ARG_"):
- """
- Convert a dictionnary containing manifest arguments
- to a dictionnary of env. var. to be passed to scripts
+def _make_environment_for_app_script(app, args={}, args_prefix="APP_ARG_"):
- Keyword arguments:
- arg -- A key/value dictionnary of manifest arguments
+ app_setting_path = os.path.join(APPS_SETTING_PATH, app)
+
+ manifest = _get_manifest_of_app(app_setting_path)
+ app_id, app_instance_nb = _parse_app_instance_name(app)
+
+ env_dict = {
+ "YNH_APP_ID": app_id,
+ "YNH_APP_INSTANCE_NAME": app,
+ "YNH_APP_INSTANCE_NUMBER": str(app_instance_nb),
+ "YNH_APP_MANIFEST_VERSION": manifest.get("version", "?"),
+ }
+
+ for arg_name, arg_value in args.items():
+ env_dict["YNH_%s%s" % (args_prefix, arg_name.upper())] = str(arg_value)
- """
- env_dict = {}
- for arg_name, arg_value_and_type in args_dict.items():
- env_dict["YNH_%s%s" % (prefix, arg_name.upper())] = arg_value_and_type[0]
return env_dict
@@ -2699,156 +2541,212 @@ def _parse_app_instance_name(app_instance_name):
"""
match = re_app_instance_name.match(app_instance_name)
assert match, "Could not parse app instance name : %s" % app_instance_name
- appid = match.groupdict().get('appid')
- app_instance_nb = int(match.groupdict().get('appinstancenb')) if match.groupdict().get('appinstancenb') is not None else 1
+ appid = match.groupdict().get("appid")
+ app_instance_nb = (
+ int(match.groupdict().get("appinstancenb"))
+ if match.groupdict().get("appinstancenb") is not None
+ else 1
+ )
return (appid, app_instance_nb)
-def _using_legacy_appslist_system():
+#
+# ############################### #
+# Applications list management #
+# ############################### #
+#
+
+
+def _initialize_apps_catalog_system():
"""
- Return True if we're using the old fetchlist scheme.
- This is determined by the presence of some cron job yunohost-applist-foo
+ This function is meant to intialize the apps_catalog system with YunoHost's default app catalog.
"""
- return glob.glob("/etc/cron.d/yunohost-applist-*") != []
+ default_apps_catalog_list = [{"id": "default", "url": APPS_CATALOG_DEFAULT_URL}]
-
-def _migrate_appslist_system():
- """
- Migrate from the legacy fetchlist system to the new one
- """
- legacy_crons = glob.glob("/etc/cron.d/yunohost-applist-*")
-
- for cron_path in legacy_crons:
- appslist_name = os.path.basename(cron_path).replace("yunohost-applist-", "")
- logger.debug(m18n.n('appslist_migrating', appslist=appslist_name))
-
- # Parse appslist url in cron
- cron_file_content = open(cron_path).read().strip()
- appslist_url_parse = re.search("-u (https?://[^ ]+)", cron_file_content)
-
- # Abort if we did not find an url
- if not appslist_url_parse or not appslist_url_parse.groups():
- # Bkp the old cron job somewhere else
- bkp_file = "/etc/yunohost/%s.oldlist.bkp" % appslist_name
- os.rename(cron_path, bkp_file)
- # Notice the user
- logger.warning(m18n.n('appslist_could_not_migrate',
- appslist=appslist_name,
- bkp_file=bkp_file))
- # Otherwise, register the list and remove the legacy cron
- else:
- appslist_url = appslist_url_parse.groups()[0]
- try:
- _register_new_appslist(appslist_url, appslist_name)
- # Might get an exception if two legacy cron jobs conflict
- # in terms of url...
- except Exception as e:
- logger.error(str(e))
- # Bkp the old cron job somewhere else
- bkp_file = "/etc/yunohost/%s.oldlist.bkp" % appslist_name
- os.rename(cron_path, bkp_file)
- # Notice the user
- logger.warning(m18n.n('appslist_could_not_migrate',
- appslist=appslist_name,
- bkp_file=bkp_file))
- else:
- os.remove(cron_path)
-
-
-def _install_appslist_fetch_cron():
-
- cron_job_file = "/etc/cron.daily/yunohost-fetch-appslists"
-
- logger.debug("Installing appslist fetch cron job")
-
- cron_job = []
- cron_job.append("#!/bin/bash")
- # We add a random delay between 0 and 60 min to avoid every instance fetching
- # the appslist at the same time every night
- cron_job.append("(sleep $((RANDOM%3600));")
- cron_job.append("yunohost app fetchlist > /dev/null 2>&1) &")
-
- with open(cron_job_file, "w") as f:
- f.write('\n'.join(cron_job))
-
- _set_permissions(cron_job_file, "root", "root", 0o755)
-
-
-# FIXME - Duplicate from certificate.py, should be moved into a common helper
-# thing...
-def _set_permissions(path, user, group, permissions):
- uid = pwd.getpwnam(user).pw_uid
- gid = grp.getgrnam(group).gr_gid
-
- os.chown(path, uid, gid)
- os.chmod(path, permissions)
-
-
-def _read_appslist_list():
- """
- Read the json corresponding to the list of appslists
- """
-
- # If file does not exists yet, return empty dict
- if not os.path.exists(APPSLISTS_JSON):
- return {}
-
- # Read file content
- with open(APPSLISTS_JSON, "r") as f:
- appslists_json = f.read()
-
- # Parse json, throw exception if what we got from file is not a valid json
try:
- appslists = json.loads(appslists_json)
- except ValueError:
- raise YunohostError('appslist_corrupted_json', filename=APPSLISTS_JSON)
-
- return appslists
-
-
-def _write_appslist_list(appslist_lists):
- """
- Update the json containing list of appslists
- """
-
- # Write appslist list
- try:
- with open(APPSLISTS_JSON, "w") as f:
- json.dump(appslist_lists, f)
+ logger.debug(
+ "Initializing apps catalog system with YunoHost's default app list"
+ )
+ write_to_yaml(APPS_CATALOG_CONF, default_apps_catalog_list)
except Exception as e:
- raise YunohostError("Error while writing list of appslist %s: %s" %
- (APPSLISTS_JSON, str(e)), raw_msg=True)
+ raise YunohostError(
+ "Could not initialize the apps catalog system... : %s" % str(e)
+ )
+
+ logger.success(m18n.n("apps_catalog_init_success"))
-def _register_new_appslist(url, name):
+def _read_apps_catalog_list():
"""
- Add a new appslist to be fetched regularly.
- Raise an exception if url or name conflicts with an existing list.
+ Read the json corresponding to the list of apps catalogs
"""
- appslist_list = _read_appslist_list()
+ try:
+ list_ = read_yaml(APPS_CATALOG_CONF)
+ # Support the case where file exists but is empty
+ # by returning [] if list_ is None
+ return list_ if list_ else []
+ except Exception as e:
+ raise YunohostError("Could not read the apps_catalog list ... : %s" % str(e))
- # Check if name conflicts with an existing list
- if name in appslist_list:
- raise YunohostError('appslist_name_already_tracked', name=name)
- # Check if url conflicts with an existing list
- known_appslist_urls = [appslist["url"] for _, appslist in appslist_list.items()]
+def _actual_apps_catalog_api_url(base_url):
- if url in known_appslist_urls:
- raise YunohostError('appslist_url_already_tracked', url=url)
+ return "{base_url}/v{version}/apps.json".format(
+ base_url=base_url, version=APPS_CATALOG_API_VERSION
+ )
- logger.debug("Registering new appslist %s at %s" % (name, url))
- appslist_list[name] = {
- "url": url,
- "lastUpdate": None
- }
+def _update_apps_catalog():
+ """
+ Fetches the json for each apps_catalog and update the cache
- _write_appslist_list(appslist_list)
+ apps_catalog_list is for example :
+ [ {"id": "default", "url": "https://app.yunohost.org/default/"} ]
- _install_appslist_fetch_cron()
+ Then for each apps_catalog, the actual json URL to be fetched is like :
+ https://app.yunohost.org/default/vX/apps.json
+
+ And store it in :
+ /var/cache/yunohost/repo/default.json
+ """
+
+ apps_catalog_list = _read_apps_catalog_list()
+
+ logger.info(m18n.n("apps_catalog_updating"))
+
+ # Create cache folder if needed
+ if not os.path.exists(APPS_CATALOG_CACHE):
+ logger.debug("Initialize folder for apps catalog cache")
+ mkdir(APPS_CATALOG_CACHE, mode=0o750, parents=True, uid="root")
+
+ for apps_catalog in apps_catalog_list:
+ apps_catalog_id = apps_catalog["id"]
+ actual_api_url = _actual_apps_catalog_api_url(apps_catalog["url"])
+
+ # Fetch the json
+ try:
+ apps_catalog_content = download_json(actual_api_url)
+ except Exception as e:
+ raise YunohostError(
+ "apps_catalog_failed_to_download",
+ apps_catalog=apps_catalog_id,
+ error=str(e),
+ )
+
+ # Remember the apps_catalog api version for later
+ apps_catalog_content["from_api_version"] = APPS_CATALOG_API_VERSION
+
+ # Save the apps_catalog data in the cache
+ cache_file = "{cache_folder}/{list}.json".format(
+ cache_folder=APPS_CATALOG_CACHE, list=apps_catalog_id
+ )
+ try:
+ write_to_json(cache_file, apps_catalog_content)
+ except Exception as e:
+ raise YunohostError(
+ "Unable to write cache data for %s apps_catalog : %s"
+ % (apps_catalog_id, str(e))
+ )
+
+ logger.success(m18n.n("apps_catalog_update_success"))
+
+
+def _load_apps_catalog():
+ """
+ Read all the apps catalog cache files and build a single dict (merged_catalog)
+ corresponding to all known apps and categories
+ """
+
+ merged_catalog = {"apps": {}, "categories": []}
+
+ for apps_catalog_id in [L["id"] for L in _read_apps_catalog_list()]:
+
+ # Let's load the json from cache for this catalog
+ cache_file = "{cache_folder}/{list}.json".format(
+ cache_folder=APPS_CATALOG_CACHE, list=apps_catalog_id
+ )
+
+ try:
+ apps_catalog_content = (
+ read_json(cache_file) if os.path.exists(cache_file) else None
+ )
+ except Exception as e:
+ raise YunohostError(
+ "Unable to read cache for apps_catalog %s : %s" % (cache_file, e),
+ raw_msg=True,
+ )
+
+ # Check that the version of the data matches version ....
+ # ... otherwise it means we updated yunohost in the meantime
+ # and need to update the cache for everything to be consistent
+ if (
+ not apps_catalog_content
+ or apps_catalog_content.get("from_api_version") != APPS_CATALOG_API_VERSION
+ ):
+ logger.info(m18n.n("apps_catalog_obsolete_cache"))
+ _update_apps_catalog()
+ apps_catalog_content = read_json(cache_file)
+
+ del apps_catalog_content["from_api_version"]
+
+ # Add apps from this catalog to the output
+ for app, info in apps_catalog_content["apps"].items():
+
+ # (N.B. : there's a small edge case where multiple apps catalog could be listing the same apps ...
+ # in which case we keep only the first one found)
+ if app in merged_catalog["apps"]:
+ logger.warning(
+ "Duplicate app %s found between apps catalog %s and %s"
+ % (app, apps_catalog_id, merged_catalog["apps"][app]["repository"])
+ )
+ continue
+
+ info["repository"] = apps_catalog_id
+ merged_catalog["apps"][app] = info
+
+ # Annnnd categories
+ merged_catalog["categories"] += apps_catalog_content["categories"]
+
+ return merged_catalog
+
+
+#
+# ############################### #
+# Small utilities #
+# ############################### #
+#
+
+
+def _make_tmp_workdir_for_app(app=None):
+
+ # Create parent dir if it doesn't exists yet
+ if not os.path.exists(APP_TMP_WORKDIRS):
+ os.makedirs(APP_TMP_WORKDIRS)
+
+ now = int(time.time())
+
+ # Cleanup old dirs (if any)
+ for dir_ in os.listdir(APP_TMP_WORKDIRS):
+ path = os.path.join(APP_TMP_WORKDIRS, dir_)
+ # We only delete folders older than an arbitary 12 hours
+ # This is to cover the stupid case of upgrades
+ # Where many app will call 'yunohost backup create'
+ # from the upgrade script itself,
+ # which will also call this function while the upgrade
+ # script itself is running in one of those dir...
+ # It could be that there are other edge cases
+ # such as app-install-during-app-install
+ if os.stat(path).st_mtime < now - 12 * 3600:
+ shutil.rmtree(path)
+ tmpdir = tempfile.mkdtemp(prefix="app_", dir=APP_TMP_WORKDIRS)
+
+ # Copy existing app scripts, conf, ... if an app arg was provided
+ if app:
+ os.system(f"cp -a {APPS_SETTING_PATH}/{app}/* {tmpdir}")
+
+ return tmpdir
def is_true(arg):
@@ -2864,78 +2762,114 @@ def is_true(arg):
"""
if isinstance(arg, bool):
return arg
- elif isinstance(arg, basestring):
- true_list = ['yes', 'Yes', 'true', 'True']
- for string in true_list:
- if arg == string:
- return True
- return False
+ elif isinstance(arg, str):
+ return arg.lower() in ["yes", "true", "on"]
else:
- logger.debug('arg should be a boolean or a string, got %r', arg)
+ logger.debug("arg should be a boolean or a string, got %r", arg)
return True if arg else False
-def random_password(length=8):
- """
- Generate a random string
-
- Keyword arguments:
- length -- The string length to generate
-
- """
- import string
- import random
-
- char_set = string.ascii_uppercase + string.digits + string.ascii_lowercase
- return ''.join([random.SystemRandom().choice(char_set) for x in range(length)])
-
-
def unstable_apps():
- raw_app_installed = app_list(installed=True, raw=True)
output = []
- for app, infos in raw_app_installed.items():
+ for infos in app_list(full=True)["apps"]:
- repo = infos.get("repository", None)
- state = infos.get("state", None)
-
- if repo is None or state in ["inprogress", "notworking"]:
- output.append(app)
+ if not infos.get("from_catalog") or infos.get("from_catalog").get("state") in [
+ "inprogress",
+ "notworking",
+ ]:
+ output.append(infos["id"])
return output
-def _check_services_status_for_app(services):
+def _assert_system_is_sane_for_app(manifest, when):
+
+ from yunohost.service import service_status
logger.debug("Checking that required services are up and running...")
+ services = manifest.get("services", [])
+
# Some apps use php-fpm or php5-fpm which is now php7.0-fpm
def replace_alias(service):
- if service in ["php-fpm", "php5-fpm"]:
- return "php7.0-fpm"
+ if service in ["php-fpm", "php5-fpm", "php7.0-fpm"]:
+ return "php7.3-fpm"
else:
return service
+
services = [replace_alias(s) for s in services]
# We only check those, mostly to ignore "custom" services
# (added by apps) and because those are the most popular
# services
- service_filter = ["nginx", "php7.0-fpm", "mysql", "postfix"]
+ service_filter = ["nginx", "php7.3-fpm", "mysql", "postfix"]
services = [str(s) for s in services if s in service_filter]
+ if "nginx" not in services:
+ services = ["nginx"] + services
+ if "fail2ban" not in services:
+ services.append("fail2ban")
+
+ # Wait if a service is reloading
+ test_nb = 0
+ while test_nb < 16:
+ if not any(s for s in services if service_status(s)["status"] == "reloading"):
+ break
+ time.sleep(0.5)
+ test_nb += 1
+
# List services currently down and raise an exception if any are found
- faulty_services = [s for s in services if service_status(s)["active"] != "active"]
+ services_status = {s: service_status(s) for s in services}
+ faulty_services = [
+ f"{s} ({status['status']})"
+ for s, status in services_status.items()
+ if status["status"] != "running"
+ ]
+
if faulty_services:
- raise YunohostError('app_action_cannot_be_ran_because_required_services_down',
- services=', '.join(faulty_services))
+ if when == "pre":
+ raise YunohostValidationError(
+ "app_action_cannot_be_ran_because_required_services_down",
+ services=", ".join(faulty_services),
+ )
+ elif when == "post":
+ raise YunohostError(
+ "app_action_broke_system", services=", ".join(faulty_services)
+ )
+
+ if packages.dpkg_is_broken():
+ if when == "pre":
+ raise YunohostValidationError("dpkg_is_broken")
+ elif when == "post":
+ raise YunohostError("this_action_broke_dpkg")
-def _patch_php5(app_folder):
+LEGACY_PHP_VERSION_REPLACEMENTS = [
+ ("/etc/php5", "/etc/php/7.3"),
+ ("/etc/php/7.0", "/etc/php/7.3"),
+ ("/var/run/php5-fpm", "/var/run/php/php7.3-fpm"),
+ ("/var/run/php/php7.0-fpm", "/var/run/php/php7.3-fpm"),
+ ("php5", "php7.3"),
+ ("php7.0", "php7.3"),
+ (
+ 'phpversion="${phpversion:-7.0}"',
+ 'phpversion="${phpversion:-7.3}"',
+ ), # Many helpers like the composer ones use 7.0 by default ...
+ (
+ '"$phpversion" == "7.0"',
+ '$(bc <<< "$phpversion >= 7.3") -eq 1',
+ ), # patch ynh_install_php to refuse installing/removing php <= 7.3
+]
+
+
+def _patch_legacy_php_versions(app_folder):
files_to_patch = []
files_to_patch.extend(glob.glob("%s/conf/*" % app_folder))
files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder))
+ files_to_patch.extend(glob.glob("%s/scripts/*/*" % app_folder))
files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder))
files_to_patch.append("%s/manifest.json" % app_folder)
files_to_patch.append("%s/manifest.toml" % app_folder)
@@ -2946,8 +2880,177 @@ def _patch_php5(app_folder):
if not os.path.isfile(filename):
continue
- c = "sed -i -e 's@/etc/php5@/etc/php/7.0@g' " \
- "-e 's@/var/run/php5-fpm@/var/run/php/php7.0-fpm@g' " \
- "-e 's@php5@php7.0@g' " \
- "%s" % filename
+ c = (
+ "sed -i "
+ + "".join(
+ "-e 's@{pattern}@{replace}@g' ".format(pattern=p, replace=r)
+ for p, r in LEGACY_PHP_VERSION_REPLACEMENTS
+ )
+ + "%s" % filename
+ )
os.system(c)
+
+
+def _patch_legacy_php_versions_in_settings(app_folder):
+
+ settings = read_yaml(os.path.join(app_folder, "settings.yml"))
+
+ if settings.get("fpm_config_dir") == "/etc/php/7.0/fpm":
+ settings["fpm_config_dir"] = "/etc/php/7.3/fpm"
+ if settings.get("fpm_service") == "php7.0-fpm":
+ settings["fpm_service"] = "php7.3-fpm"
+ if settings.get("phpversion") == "7.0":
+ settings["phpversion"] = "7.3"
+
+ # We delete these checksums otherwise the file will appear as manually modified
+ list_to_remove = ["checksum__etc_php_7.0_fpm_pool", "checksum__etc_nginx_conf.d"]
+ settings = {
+ k: v
+ for k, v in settings.items()
+ if not any(k.startswith(to_remove) for to_remove in list_to_remove)
+ }
+
+ write_to_yaml(app_folder + "/settings.yml", settings)
+
+
+def _patch_legacy_helpers(app_folder):
+
+ files_to_patch = []
+ files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder))
+ files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder))
+
+ stuff_to_replace = {
+ # Replace
+ # sudo yunohost app initdb $db_user -p $db_pwd
+ # by
+ # ynh_mysql_setup_db --db_user=$db_user --db_name=$db_user --db_pwd=$db_pwd
+ "yunohost app initdb": {
+ "pattern": r"(sudo )?yunohost app initdb \"?(\$\{?\w+\}?)\"?\s+-p\s\"?(\$\{?\w+\}?)\"?",
+ "replace": r"ynh_mysql_setup_db --db_user=\2 --db_name=\2 --db_pwd=\3",
+ "important": True,
+ },
+ # Replace
+ # sudo yunohost app checkport whaterver
+ # by
+ # ynh_port_available whatever
+ "yunohost app checkport": {
+ "pattern": r"(sudo )?yunohost app checkport",
+ "replace": r"ynh_port_available",
+ "important": True,
+ },
+ # We can't migrate easily port-available
+ # .. but at the time of writing this code, only two non-working apps are using it.
+ "yunohost tools port-available": {"important": True},
+ # Replace
+ # yunohost app checkurl "${domain}${path_url}" -a "${app}"
+ # by
+ # ynh_webpath_register --app=${app} --domain=${domain} --path_url=${path_url}
+ "yunohost app checkurl": {
+ "pattern": r"(sudo )?yunohost app checkurl \"?(\$\{?\w+\}?)\/?(\$\{?\w+\}?)\"?\s+-a\s\"?(\$\{?\w+\}?)\"?",
+ "replace": r"ynh_webpath_register --app=\4 --domain=\2 --path_url=\3",
+ "important": True,
+ },
+ # Remove
+ # Automatic diagnosis data from YunoHost
+ # __PRE_TAG1__$(yunohost tools diagnosis | ...)__PRE_TAG2__"
+ #
+ "yunohost tools diagnosis": {
+ "pattern": r"(Automatic diagnosis data from YunoHost( *\n)*)? *(__\w+__)? *\$\(yunohost tools diagnosis.*\)(__\w+__)?",
+ "replace": r"",
+ "important": False,
+ },
+ # Old $1, $2 in backup/restore scripts...
+ "app=$2": {
+ "only_for": ["scripts/backup", "scripts/restore"],
+ "pattern": r"app=\$2",
+ "replace": r"app=$YNH_APP_INSTANCE_NAME",
+ "important": True,
+ },
+ # Old $1, $2 in backup/restore scripts...
+ "backup_dir=$1": {
+ "only_for": ["scripts/backup", "scripts/restore"],
+ "pattern": r"backup_dir=\$1",
+ "replace": r"backup_dir=.",
+ "important": True,
+ },
+ # Old $1, $2 in backup/restore scripts...
+ "restore_dir=$1": {
+ "only_for": ["scripts/restore"],
+ "pattern": r"restore_dir=\$1",
+ "replace": r"restore_dir=.",
+ "important": True,
+ },
+ # Old $1, $2 in install scripts...
+ # We ain't patching that shit because it ain't trivial to patch all args...
+ "domain=$1": {"only_for": ["scripts/install"], "important": True},
+ }
+
+ for helper, infos in stuff_to_replace.items():
+ infos["pattern"] = (
+ re.compile(infos["pattern"]) if infos.get("pattern") else None
+ )
+ infos["replace"] = infos.get("replace")
+
+ for filename in files_to_patch:
+
+ # Ignore non-regular files
+ if not os.path.isfile(filename):
+ continue
+
+ try:
+ content = read_file(filename)
+ except MoulinetteError:
+ continue
+
+ replaced_stuff = False
+ show_warning = False
+
+ for helper, infos in stuff_to_replace.items():
+
+ # Ignore if not relevant for this file
+ if infos.get("only_for") and not any(
+ filename.endswith(f) for f in infos["only_for"]
+ ):
+ continue
+
+ # If helper is used, attempt to patch the file
+ if helper in content and infos["pattern"]:
+ content = infos["pattern"].sub(infos["replace"], content)
+ replaced_stuff = True
+ if infos["important"]:
+ show_warning = True
+
+ # If the helper is *still* in the content, it means that we
+ # couldn't patch the deprecated helper in the previous lines. In
+ # that case, abort the install or whichever step is performed
+ if helper in content and infos["important"]:
+ raise YunohostValidationError(
+ "This app is likely pretty old and uses deprecated / outdated helpers that can't be migrated easily. It can't be installed anymore.",
+ raw_msg=True,
+ )
+
+ if replaced_stuff:
+
+ # Check the app do load the helper
+ # If it doesn't, add the instruction ourselve (making sure it's after the #!/bin/bash if it's there...
+ if filename.split("/")[-1] in [
+ "install",
+ "remove",
+ "upgrade",
+ "backup",
+ "restore",
+ ]:
+ source_helpers = "source /usr/share/yunohost/helpers"
+ if source_helpers not in content:
+ content.replace("#!/bin/bash", "#!/bin/bash\n" + source_helpers)
+ if source_helpers not in content:
+ content = source_helpers + "\n" + content
+
+ # Actually write the new content in the file
+ write_to_file(filename, content)
+
+ if show_warning:
+ # And complain about those damn deprecated helpers
+ logger.error(
+ r"/!\ Packagers ! This app uses a very old deprecated helpers ... Yunohost automatically patched the helpers to use the new recommended practice, but please do consider fixing the upstream code right now ..."
+ )
diff --git a/src/yunohost/authenticators/ldap_admin.py b/src/yunohost/authenticators/ldap_admin.py
new file mode 100644
index 000000000..94d68a8db
--- /dev/null
+++ b/src/yunohost/authenticators/ldap_admin.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+
+import os
+import logging
+import ldap
+import ldap.sasl
+import time
+
+from moulinette import m18n
+from moulinette.authentication import BaseAuthenticator
+from yunohost.utils.error import YunohostError
+
+logger = logging.getLogger("yunohost.authenticators.ldap_admin")
+
+
+class Authenticator(BaseAuthenticator):
+
+ name = "ldap_admin"
+
+ def __init__(self, *args, **kwargs):
+ self.uri = "ldap://localhost:389"
+ self.basedn = "dc=yunohost,dc=org"
+ self.admindn = "cn=admin,dc=yunohost,dc=org"
+
+ def _authenticate_credentials(self, credentials=None):
+
+ # TODO : change authentication format
+ # to support another dn to support multi-admins
+
+ def _reconnect():
+ con = ldap.ldapobject.ReconnectLDAPObject(
+ self.uri, retry_max=10, retry_delay=0.5
+ )
+ con.simple_bind_s(self.admindn, credentials)
+ return con
+
+ try:
+ con = _reconnect()
+ except ldap.INVALID_CREDENTIALS:
+ raise YunohostError("invalid_password")
+ except ldap.SERVER_DOWN:
+ # ldap is down, attempt to restart it before really failing
+ logger.warning(m18n.n("ldap_server_is_down_restart_it"))
+ os.system("systemctl restart slapd")
+ time.sleep(10) # waits 10 secondes so we are sure that slapd has restarted
+
+ try:
+ con = _reconnect()
+ except ldap.SERVER_DOWN:
+ raise YunohostError("ldap_server_down")
+
+ # Check that we are indeed logged in with the expected identity
+ try:
+ # whoami_s return dn:..., then delete these 3 characters
+ who = con.whoami_s()[3:]
+ except Exception as e:
+ logger.warning("Error during ldap authentication process: %s", e)
+ raise
+ else:
+ if who != self.admindn:
+ raise YunohostError(
+ f"Not logged with the appropriate identity ? Found {who}, expected {self.admindn} !?",
+ raw_msg=True,
+ )
+ finally:
+ # Free the connection, we don't really need it to keep it open as the point is only to check authentication...
+ if con:
+ con.unbind_s()
diff --git a/src/yunohost/backup.py b/src/yunohost/backup.py
index 8f256491d..b02b23966 100644
--- a/src/yunohost/backup.py
+++ b/src/yunohost/backup.py
@@ -34,34 +34,55 @@ import tempfile
from datetime import datetime
from glob import glob
from collections import OrderedDict
+from functools import reduce
+from packaging import version
-from moulinette import msignals, m18n
-from yunohost.utils.error import YunohostError
+from moulinette import Moulinette, m18n
from moulinette.utils import filesystem
from moulinette.core import MoulinetteError
from moulinette.utils.log import getActionLogger
-from moulinette.utils.filesystem import read_file, mkdir
+from moulinette.utils.filesystem import read_file, mkdir, write_to_yaml, read_yaml
+from moulinette.utils.process import check_output
+import yunohost.domain
from yunohost.app import (
- app_info, _is_installed, _parse_app_instance_name, _patch_php5
+ app_info,
+ _is_installed,
+ _make_environment_for_app_script,
+ _patch_legacy_helpers,
+ _patch_legacy_php_versions,
+ _patch_legacy_php_versions_in_settings,
+ LEGACY_PHP_VERSION_REPLACEMENTS,
+ _make_tmp_workdir_for_app,
)
from yunohost.hook import (
- hook_list, hook_info, hook_callback, hook_exec, CUSTOM_HOOK_FOLDER
+ hook_list,
+ hook_info,
+ hook_callback,
+ hook_exec,
+ hook_exec_with_script_debug_if_failure,
+ CUSTOM_HOOK_FOLDER,
+)
+from yunohost.tools import (
+ tools_postinstall,
+ _tools_migrations_run_after_system_restore,
+ _tools_migrations_run_before_app_restore,
)
-from yunohost.monitor import binary_to_human
-from yunohost.tools import tools_postinstall
from yunohost.regenconf import regen_conf
-from yunohost.log import OperationLogger
+from yunohost.log import OperationLogger, is_unit_operation
from yunohost.repository import BackupRepository
-from functools import reduce
+from yunohost.utils.error import YunohostError, YunohostValidationError
+from yunohost.utils.packages import ynh_packages_version
+from yunohost.utils.filesystem import free_space_in_directory
+from yunohost.settings import settings_get
-BACKUP_PATH = '/home/yunohost.backup'
-ARCHIVES_PATH = '%s/archives' % BACKUP_PATH
+BACKUP_PATH = "/home/yunohost.backup"
+ARCHIVES_PATH = "%s/archives" % BACKUP_PATH
APP_MARGIN_SPACE_SIZE = 100 # In MB
CONF_MARGIN_SPACE_SIZE = 10 # IN MB
POSTINSTALL_ESTIMATE_SPACE_SIZE = 5 # In MB
MB_ALLOWED_TO_ORGANIZE = 10
-logger = getActionLogger('yunohost.backup')
+logger = getActionLogger("yunohost.backup")
class BackupRestoreTargetsManager(object):
@@ -73,10 +94,7 @@ class BackupRestoreTargetsManager(object):
def __init__(self):
self.targets = {}
- self.results = {
- "system": {},
- "apps": {}
- }
+ self.results = {"system": {}, "apps": {}}
def set_result(self, category, element, value):
"""
@@ -99,14 +117,18 @@ class BackupRestoreTargetsManager(object):
self.results[category][element] = value
else:
currentValue = self.results[category][element]
- if (levels.index(currentValue) > levels.index(value)):
+ if levels.index(currentValue) > levels.index(value):
return
else:
self.results[category][element] = value
- def set_wanted(self, category,
- wanted_targets, available_targets,
- error_if_wanted_target_is_unavailable):
+ def set_wanted(
+ self,
+ category,
+ wanted_targets,
+ available_targets,
+ error_if_wanted_target_is_unavailable,
+ ):
"""
Define and validate targets to be backuped or to be restored (list of
system parts, apps..). The wanted targets are compared and filtered
@@ -138,13 +160,15 @@ class BackupRestoreTargetsManager(object):
# If the user manually specified which targets to backup, we need to
# validate that each target is actually available
else:
- self.targets[category] = [part for part in wanted_targets
- if part in available_targets]
+ self.targets[category] = [
+ part for part in wanted_targets if part in available_targets
+ ]
# Display an error for each target asked by the user but which is
# unknown
- unavailable_targets = [part for part in wanted_targets
- if part not in available_targets]
+ unavailable_targets = [
+ part for part in wanted_targets if part not in available_targets
+ ]
for target in unavailable_targets:
self.set_result(category, target, "Skipped")
@@ -165,19 +189,26 @@ class BackupRestoreTargetsManager(object):
with respect to the current 'result' of the target.
"""
- assert (include and isinstance(include, list) and not exclude) \
- or (exclude and isinstance(exclude, list) and not include)
+ assert (include and isinstance(include, list) and not exclude) or (
+ exclude and isinstance(exclude, list) and not include
+ )
if include:
- return [target.encode("Utf-8") for target in self.targets[category]
- if self.results[category][target] in include]
+ return [
+ target
+ for target in self.targets[category]
+ if self.results[category][target] in include
+ ]
if exclude:
- return [target.encode("Utf-8") for target in self.targets[category]
- if self.results[category][target] not in exclude]
+ return [
+ target
+ for target in self.targets[category]
+ if self.results[category][target] not in exclude
+ ]
-class BackupManager():
+class BackupManager:
"""
This class collect files to backup in a list and apply one or several
@@ -219,8 +250,8 @@ class BackupManager():
backup_manager = BackupManager(name="mybackup", description="bkp things")
# Add backup method to apply
- backup_manager.add(BackupMethod.create('copy','/mnt/local_fs'))
- backup_manager.add(BackupMethod.create('tar','/mnt/remote_fs'))
+ backup_manager.add('copy', output_directory='/mnt/local_fs')
+ backup_manager.add('tar', output_directory='/mnt/remote_fs')
# Define targets to be backuped
backup_manager.set_system_targets(["data"])
@@ -233,7 +264,7 @@ class BackupManager():
backup_manager.backup()
"""
- def __init__(self, name=None, description='', work_dir=None):
+ def __init__(self, name=None, description="", methods=[], work_dir=None):
"""
BackupManager constructor
@@ -247,16 +278,12 @@ class BackupManager():
work_dir -- (None|string) A path where prepare the archive. If None,
temporary work_dir will be created (default: None)
"""
- self.description = description or ''
+ self.description = description or ""
self.created_at = int(time.time())
self.apps_return = {}
self.system_return = {}
- self.methods = []
self.paths_to_backup = []
- self.size_details = {
- 'system': {},
- 'apps': {}
- }
+ self.size_details = {"system": {}, "apps": {}}
self.targets = BackupRestoreTargetsManager()
# Define backup name if needed
@@ -267,9 +294,14 @@ class BackupManager():
# Define working directory if needed and initialize it
self.work_dir = work_dir
if self.work_dir is None:
- self.work_dir = os.path.join(BACKUP_PATH, 'tmp', name)
+ self.work_dir = os.path.join(BACKUP_PATH, "tmp", name)
self._init_work_dir()
+ # Initialize backup methods
+ self.methods = [
+ BackupMethod.create(method, self, repo=work_dir) for method in methods
+ ]
+
#
# Misc helpers #
#
@@ -278,19 +310,20 @@ class BackupManager():
def info(self):
"""(Getter) Dict containing info about the archive being created"""
return {
- 'description': self.description,
- 'created_at': self.created_at,
- 'size': self.size,
- 'size_details': self.size_details,
- 'apps': self.apps_return,
- 'system': self.system_return
+ "description": self.description,
+ "created_at": self.created_at,
+ "size": self.size,
+ "size_details": self.size_details,
+ "apps": self.apps_return,
+ "system": self.system_return,
+ "from_yunohost_version": ynh_packages_version()["yunohost"]["version"],
}
@property
def is_tmp_work_dir(self):
"""(Getter) Return true if the working directory is temporary and should
be clean at the end of the backup"""
- return self.work_dir == os.path.join(BACKUP_PATH, 'tmp', self.name)
+ return self.work_dir == os.path.join(BACKUP_PATH, "tmp", self.name)
def __repr__(self):
return json.dumps(self.info)
@@ -302,43 +335,34 @@ class BackupManager():
(string) A backup name created from current date 'YYMMDD-HHMMSS'
"""
# FIXME: case where this name already exist
- return time.strftime('%Y%m%d-%H%M%S', time.gmtime())
+ return time.strftime("%Y%m%d-%H%M%S", time.gmtime())
def _init_work_dir(self):
"""Initialize preparation directory
Ensure the working directory exists and is empty
-
- exception:
- backup_output_directory_not_empty -- (YunohostError) Raised if the
- directory was given by the user and isn't empty
-
- (TODO) backup_cant_clean_tmp_working_directory -- (YunohostError)
- Raised if the working directory isn't empty, is temporary and can't
- be automaticcaly cleaned
-
- (TODO) backup_cant_create_working_directory -- (YunohostError) Raised
- if iyunohost can't create the working directory
"""
# FIXME replace isdir by exists ? manage better the case where the path
# exists
if not os.path.isdir(self.work_dir):
- filesystem.mkdir(self.work_dir, 0o750, parents=True, uid='admin')
+ filesystem.mkdir(self.work_dir, 0o750, parents=True, uid="admin")
elif self.is_tmp_work_dir:
- logger.debug("temporary directory for backup '%s' already exists... attempting to clean it",
- self.work_dir)
+ logger.debug(
+ "temporary directory for backup '%s' already exists... attempting to clean it",
+ self.work_dir,
+ )
# Try to recursively unmount stuff (from a previously failed backup ?)
if not _recursive_umount(self.work_dir):
- raise YunohostError('backup_output_directory_not_empty')
+ raise YunohostValidationError("backup_output_directory_not_empty")
else:
# If umount succeeded, remove the directory (we checked that
# we're in /home/yunohost.backup/tmp so that should be okay...
# c.f. method clean() which also does this)
filesystem.rm(self.work_dir, recursive=True, force=True)
- filesystem.mkdir(self.work_dir, 0o750, parents=True, uid='admin')
+ filesystem.mkdir(self.work_dir, 0o750, parents=True, uid="admin")
#
# Backup target management #
@@ -353,12 +377,13 @@ class BackupManager():
If empty list, all system will be backuped. If None,
no system parts will be backuped.
"""
- def unknown_error(part):
- logger.error(m18n.n('backup_hook_unknown', hook=part))
- self.targets.set_wanted("system",
- system_parts, hook_list('backup')["hooks"],
- unknown_error)
+ def unknown_error(part):
+ logger.error(m18n.n("backup_hook_unknown", hook=part))
+
+ self.targets.set_wanted(
+ "system", system_parts, hook_list("backup")["hooks"], unknown_error
+ )
def set_apps_targets(self, apps=[]):
"""
@@ -369,12 +394,13 @@ class BackupManager():
list, all apps will be backuped. If given None, no apps will be
backuped.
"""
- def unknown_error(app):
- logger.error(m18n.n('unbackup_app', app=app))
- target_list = self.targets.set_wanted("apps", apps,
- os.listdir('/etc/yunohost/apps'),
- unknown_error)
+ def unknown_error(app):
+ logger.error(m18n.n("unbackup_app", app=app))
+
+ target_list = self.targets.set_wanted(
+ "apps", apps, os.listdir("/etc/yunohost/apps"), unknown_error
+ )
# Additionnaly, we need to check that each targetted app has a
# backup and restore scripts
@@ -385,11 +411,11 @@ class BackupManager():
restore_script_path = os.path.join(app_script_folder, "restore")
if not os.path.isfile(backup_script_path):
- logger.warning(m18n.n('backup_with_no_backup_script_for_app', app=app))
+ logger.warning(m18n.n("backup_with_no_backup_script_for_app", app=app))
self.targets.set_result("apps", app, "Skipped")
elif not os.path.isfile(restore_script_path):
- logger.warning(m18n.n('backup_with_no_restore_script_for_app', app=app))
+ logger.warning(m18n.n("backup_with_no_restore_script_for_app", app=app))
self.targets.set_result("apps", app, "Warning")
#
@@ -434,7 +460,7 @@ class BackupManager():
source = os.path.join(self.work_dir, source)
if dest.endswith("/"):
dest = os.path.join(dest, os.path.basename(source))
- self.paths_to_backup.append({'source': source, 'dest': dest})
+ self.paths_to_backup.append({"source": source, "dest": dest})
def _write_csv(self):
"""
@@ -461,20 +487,21 @@ class BackupManager():
backup_csv_creation_failed -- Raised if the CSV couldn't be created
backup_csv_addition_failed -- Raised if we can't write in the CSV
"""
- self.csv_path = os.path.join(self.work_dir, 'backup.csv')
+ self.csv_path = os.path.join(self.work_dir, "backup.csv")
try:
- self.csv_file = open(self.csv_path, 'a')
- self.fieldnames = ['source', 'dest']
- self.csv = csv.DictWriter(self.csv_file, fieldnames=self.fieldnames,
- quoting=csv.QUOTE_ALL)
+ self.csv_file = open(self.csv_path, "a")
+ self.fieldnames = ["source", "dest"]
+ self.csv = csv.DictWriter(
+ self.csv_file, fieldnames=self.fieldnames, quoting=csv.QUOTE_ALL
+ )
except (IOError, OSError, csv.Error):
- logger.error(m18n.n('backup_csv_creation_failed'))
+ logger.error(m18n.n("backup_csv_creation_failed"))
for row in self.paths_to_backup:
try:
self.csv.writerow(row)
except csv.Error:
- logger.error(m18n.n('backup_csv_addition_failed'))
+ logger.error(m18n.n("backup_csv_addition_failed"))
self.csv_file.close()
#
@@ -502,10 +529,6 @@ class BackupManager():
files to backup
hooks/ -- restore scripts associated to system backup scripts are
copied here
-
- Exceptions:
- "backup_nothings_done" -- (YunohostError) This exception is raised if
- nothing has been listed.
"""
self._collect_system_files()
@@ -517,17 +540,17 @@ class BackupManager():
if not successfull_apps and not successfull_system:
filesystem.rm(self.work_dir, True, True)
- raise YunohostError('backup_nothings_done')
+ raise YunohostError("backup_nothings_done")
# Add unlisted files from backup tmp dir
- self._add_to_list_to_backup('backup.csv')
- self._add_to_list_to_backup('info.json')
- if len(self.apps_return) > 0:
- self._add_to_list_to_backup('apps')
- if os.path.isdir(os.path.join(self.work_dir, 'conf')):
- self._add_to_list_to_backup('conf')
- if os.path.isdir(os.path.join(self.work_dir, 'data')):
- self._add_to_list_to_backup('data')
+ self._add_to_list_to_backup("backup.csv")
+ self._add_to_list_to_backup("info.json")
+ for app in self.apps_return.keys():
+ self._add_to_list_to_backup(f"apps/{app}")
+ if os.path.isdir(os.path.join(self.work_dir, "conf")):
+ self._add_to_list_to_backup("conf")
+ if os.path.isdir(os.path.join(self.work_dir, "data")):
+ self._add_to_list_to_backup("data")
# Write CSV file
self._write_csv()
@@ -536,7 +559,7 @@ class BackupManager():
self._compute_backup_size()
# Create backup info file
- with open("%s/info.json" % self.work_dir, 'w') as f:
+ with open("%s/info.json" % self.work_dir, "w") as f:
f.write(json.dumps(self.info))
def _get_env_var(self, app=None):
@@ -553,18 +576,15 @@ class BackupManager():
"""
env_var = {}
- _, tmp_csv = tempfile.mkstemp(prefix='backupcsv_')
- env_var['YNH_BACKUP_DIR'] = self.work_dir
- env_var['YNH_BACKUP_CSV'] = tmp_csv
+ _, tmp_csv = tempfile.mkstemp(prefix="backupcsv_")
+ env_var["YNH_BACKUP_DIR"] = self.work_dir
+ env_var["YNH_BACKUP_CSV"] = tmp_csv
if app is not None:
- app_id, app_instance_nb = _parse_app_instance_name(app)
- env_var["YNH_APP_ID"] = app_id
- env_var["YNH_APP_INSTANCE_NAME"] = app
- env_var["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb)
- tmp_app_dir = os.path.join('apps/', app)
- tmp_app_bkp_dir = os.path.join(self.work_dir, tmp_app_dir, 'backup')
- env_var["YNH_APP_BACKUP_DIR"] = tmp_app_bkp_dir
+ env_var.update(_make_environment_for_app_script(app))
+ env_var["YNH_APP_BACKUP_DIR"] = os.path.join(
+ self.work_dir, "apps", app, "backup"
+ )
return env_var
@@ -589,27 +609,37 @@ class BackupManager():
if system_targets == []:
return
- logger.debug(m18n.n('backup_running_hooks'))
+ logger.debug(m18n.n("backup_running_hooks"))
# Prepare environnement
env_dict = self._get_env_var()
# Actual call to backup scripts/hooks
- ret = hook_callback('backup',
- system_targets,
- args=[self.work_dir],
- env=env_dict,
- chdir=self.work_dir)
+ ret = hook_callback(
+ "backup",
+ system_targets,
+ args=[self.work_dir],
+ env=env_dict,
+ chdir=self.work_dir,
+ )
- ret_succeed = {hook: {path:result["state"] for path, result in infos.items()}
- for hook, infos in ret.items()
- if any(result["state"] == "succeed" for result in infos.values())}
- ret_failed = {hook: {path:result["state"] for path, result in infos.items.items()}
- for hook, infos in ret.items()
- if any(result["state"] == "failed" for result in infos.values())}
+ ret_succeed = {
+ hook: [
+ path for path, result in infos.items() if result["state"] == "succeed"
+ ]
+ for hook, infos in ret.items()
+ if any(result["state"] == "succeed" for result in infos.values())
+ }
+ ret_failed = {
+ hook: [
+ path for path, result in infos.items() if result["state"] == "failed"
+ ]
+ for hook, infos in ret.items()
+ if any(result["state"] == "failed" for result in infos.values())
+ }
- if ret_succeed.keys() != []:
+ if list(ret_succeed.keys()) != []:
self.system_return = ret_succeed
# Add files from targets (which they put in the CSV) to the list of
@@ -621,8 +651,7 @@ class BackupManager():
restore_hooks_dir = os.path.join(self.work_dir, "hooks", "restore")
if not os.path.exists(restore_hooks_dir):
- filesystem.mkdir(restore_hooks_dir, mode=0o750,
- parents=True, uid='admin')
+ filesystem.mkdir(restore_hooks_dir, mode=0o700, parents=True, uid="root")
restore_hooks = hook_list("restore")["hooks"]
@@ -633,15 +662,15 @@ class BackupManager():
self._add_to_list_to_backup(hook["path"], "hooks/restore/")
self.targets.set_result("system", part, "Success")
else:
- logger.warning(m18n.n('restore_hook_unavailable', hook=part))
+ logger.warning(m18n.n("restore_hook_unavailable", hook=part))
self.targets.set_result("system", part, "Warning")
for part in ret_failed.keys():
- logger.error(m18n.n('backup_system_part_failed', part=part))
+ logger.error(m18n.n("backup_system_part_failed", part=part))
self.targets.set_result("system", part, "Error")
def _collect_apps_files(self):
- """ Prepare backup for each selected apps """
+ """Prepare backup for each selected apps"""
apps_targets = self.targets.list("apps", exclude=["Skipped"])
@@ -672,84 +701,74 @@ class BackupManager():
Args:
app -- (string) an app instance name (already installed) to backup
-
- Exceptions:
- backup_app_failed -- Raised at the end if the app backup script
- execution failed
"""
- app_setting_path = os.path.join('/etc/yunohost/apps/', app)
+ from yunohost.permission import user_permission_list
+
+ app_setting_path = os.path.join("/etc/yunohost/apps/", app)
# Prepare environment
env_dict = self._get_env_var(app)
+ env_dict["YNH_APP_BASEDIR"] = os.path.join(
+ self.work_dir, "apps", app, "settings"
+ )
tmp_app_bkp_dir = env_dict["YNH_APP_BACKUP_DIR"]
- settings_dir = os.path.join(self.work_dir, 'apps', app, 'settings')
+ settings_dir = os.path.join(self.work_dir, "apps", app, "settings")
logger.info(m18n.n("app_start_backup", app=app))
+ tmp_workdir_for_app = _make_tmp_workdir_for_app(app=app)
try:
# Prepare backup directory for the app
- filesystem.mkdir(tmp_app_bkp_dir, 0o750, True, uid='admin')
+ filesystem.mkdir(tmp_app_bkp_dir, 0o700, True, uid="root")
# Copy the app settings to be able to call _common.sh
shutil.copytree(app_setting_path, settings_dir)
- # Copy app backup script in a temporary folder and execute it
- _, tmp_script = tempfile.mkstemp(prefix='backup_')
- app_script = os.path.join(app_setting_path, 'scripts/backup')
- subprocess.call(['install', '-Dm555', app_script, tmp_script])
-
- hook_exec(tmp_script, args=[tmp_app_bkp_dir, app],
- raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict)[0]
+ hook_exec(
+ f"{tmp_workdir_for_app}/scripts/backup",
+ raise_on_error=True,
+ chdir=tmp_app_bkp_dir,
+ env=env_dict,
+ )[0]
self._import_to_list_to_backup(env_dict["YNH_BACKUP_CSV"])
# backup permissions
- logger.debug(m18n.n('backup_permission', app=app))
- ldap_url = "ldap:///dc=yunohost,dc=org???(&(objectClass=permissionYnh)(cn=*.%s))" % app
- os.system("slapcat -b dc=yunohost,dc=org -H '%s' -l '%s/permission.ldif'" % (ldap_url, settings_dir))
+ logger.debug(m18n.n("backup_permission", app=app))
+ permissions = user_permission_list(full=True, apps=[app])["permissions"]
+ this_app_permissions = {name: infos for name, infos in permissions.items()}
+ write_to_yaml("%s/permissions.yml" % settings_dir, this_app_permissions)
- except:
- abs_tmp_app_dir = os.path.join(self.work_dir, 'apps/', app)
+ except Exception:
+ abs_tmp_app_dir = os.path.join(self.work_dir, "apps/", app)
shutil.rmtree(abs_tmp_app_dir, ignore_errors=True)
- logger.exception(m18n.n('backup_app_failed', app=app))
+ logger.error(m18n.n("backup_app_failed", app=app))
self.targets.set_result("apps", app, "Error")
else:
# Add app info
i = app_info(app)
self.apps_return[app] = {
- 'version': i['version'],
- 'name': i['name'],
- 'description': i['description'],
+ "version": i["version"],
+ "name": i["name"],
+ "description": i["description"],
}
self.targets.set_result("apps", app, "Success")
# Remove tmp files in all situations
finally:
- filesystem.rm(tmp_script, force=True)
+ shutil.rmtree(tmp_workdir_for_app)
filesystem.rm(env_dict["YNH_BACKUP_CSV"], force=True)
#
# Actual backup archive creation / method management #
#
- def add(self, method):
- """
- Add a backup method that will be applied after the files collection step
-
- Args:
- method -- (BackupMethod) A backup method. Currently, you can use those:
- TarBackupMethod
- CopyBackupMethod
- CustomBackupMethod
- """
- self.methods.append(method)
-
def backup(self):
"""Apply backup methods"""
for method in self.methods:
- logger.debug(m18n.n('backup_applying_method_' + method.method_name))
- method.mount_and_backup(self)
- logger.debug(m18n.n('backup_method_' + method.method_name + '_finished'))
+ logger.debug(m18n.n("backup_applying_method_" + method.method_name))
+ method.mount_and_backup()
+ logger.debug(m18n.n("backup_method_" + method.method_name + "_finished"))
def _compute_backup_size(self):
"""
@@ -772,40 +791,43 @@ class BackupManager():
# size info
self.size = 0
for system_key in self.system_return:
- self.size_details['system'][system_key] = 0
+ self.size_details["system"][system_key] = 0
for app_key in self.apps_return:
- self.size_details['apps'][app_key] = 0
+ self.size_details["apps"][app_key] = 0
for row in self.paths_to_backup:
- if row['dest'] != "info.json":
- size = disk_usage(row['source'])
+ if row["dest"] == "info.json":
+ continue
- # Add size to apps details
- splitted_dest = row['dest'].split('/')
- category = splitted_dest[0]
- if category == 'apps':
- for app_key in self.apps_return:
- if row['dest'].startswith('apps/' + app_key):
- self.size_details['apps'][app_key] += size
- break
- # OR Add size to the correct system element
- elif category == 'data' or category == 'conf':
- for system_key in self.system_return:
- if row['dest'].startswith(system_key.replace('_', '/')):
- self.size_details['system'][system_key] += size
- break
+ size = disk_usage(row["source"])
- self.size += size
+ # Add size to apps details
+ splitted_dest = row["dest"].split("/")
+ category = splitted_dest[0]
+ if category == "apps":
+ for app_key in self.apps_return:
+ if row["dest"].startswith("apps/" + app_key):
+ self.size_details["apps"][app_key] += size
+ break
+
+ # OR Add size to the correct system element
+ elif category == "data" or category == "conf":
+ for system_key in self.system_return:
+ if row["dest"].startswith(system_key.replace("_", "/")):
+ self.size_details["system"][system_key] += size
+ break
+
+ self.size += size
return self.size
-class RestoreManager():
+class RestoreManager:
"""
RestoreManager allow to restore a past backup archive
- Currently it's a tar.gz file, but it could be another kind of archive
+ Currently it's a tar file, but it could be another kind of archive
Public properties:
info (getter)i # FIXME
@@ -831,23 +853,26 @@ class RestoreManager():
return restore_manager.result
"""
- def __init__(self, name, repo=None, method='tar'):
+ def __init__(self, name, method="tar"):
"""
RestoreManager constructor
Args:
name -- (string) Archive name
- repo -- (string|None) Repository where is this archive, it could be a
- path (default: /home/yunohost.backup/archives)
method -- (string) Method name to use to mount the archive
"""
# Retrieve and open the archive
# FIXME this way to get the info is not compatible with copy or custom
# backup methods
self.info = backup_info(name, with_details=True)
- self.archive_path = self.info['path']
+ if not self.info["from_yunohost_version"] or version.parse(
+ self.info["from_yunohost_version"]
+ ) < version.parse("3.8.0"):
+ raise YunohostValidationError("restore_backup_too_old")
+
+ self.archive_path = self.info["path"]
self.name = name
- self.method = BackupMethod.create(method)
+ self.method = BackupMethod.create(method, self)
self.targets = BackupRestoreTargetsManager()
#
@@ -860,20 +885,16 @@ class RestoreManager():
successful_apps = self.targets.list("apps", include=["Success", "Warning"])
successful_system = self.targets.list("system", include=["Success", "Warning"])
- return len(successful_apps) != 0 \
- or len(successful_system) != 0
+ return len(successful_apps) != 0 or len(successful_system) != 0
def _read_info_files(self):
"""
Read the info file from inside an archive
-
- Exceptions:
- backup_invalid_archive -- Raised if we can't read the info
"""
# Retrieve backup info
info_file = os.path.join(self.work_dir, "info.json")
try:
- with open(info_file, 'r') as f:
+ with open(info_file, "r") as f:
self.info = json.load(f)
# Historically, "system" was "hooks"
@@ -881,50 +902,52 @@ class RestoreManager():
self.info["system"] = self.info["hooks"]
except IOError:
logger.debug("unable to load '%s'", info_file, exc_info=1)
- raise YunohostError('backup_invalid_archive')
+ raise YunohostError(
+ "backup_archive_cant_retrieve_info_json", archive=self.archive_path
+ )
else:
- logger.debug("restoring from backup '%s' created on %s", self.name,
- datetime.utcfromtimestamp(self.info['created_at']))
+ logger.debug(
+ "restoring from backup '%s' created on %s",
+ self.name,
+ datetime.utcfromtimestamp(self.info["created_at"]),
+ )
def _postinstall_if_needed(self):
"""
Post install yunohost if needed
-
- Exceptions:
- backup_invalid_archive -- Raised if the current_host isn't in the
- archive
"""
# Check if YunoHost is installed
- if not os.path.isfile('/etc/yunohost/installed'):
+ if not os.path.isfile("/etc/yunohost/installed"):
# Retrieve the domain from the backup
try:
- with open("%s/conf/ynh/current_host" % self.work_dir, 'r') as f:
+ with open("%s/conf/ynh/current_host" % self.work_dir, "r") as f:
domain = f.readline().rstrip()
except IOError:
- logger.debug("unable to retrieve current_host from the backup",
- exc_info=1)
+ logger.debug(
+ "unable to retrieve current_host from the backup", exc_info=1
+ )
# FIXME include the current_host by default ?
- raise YunohostError('backup_invalid_archive')
+ raise YunohostError(
+ "The main domain name cannot be retrieved from inside the archive, and is needed to perform the postinstall",
+ raw_msg=True,
+ )
logger.debug("executing the post-install...")
- tools_postinstall(domain, 'Yunohost', True)
-
+ tools_postinstall(domain, "Yunohost", True)
def clean(self):
"""
End a restore operations by cleaning the working directory and
regenerate ssowat conf (if some apps were restored)
"""
- from permission import permission_sync_to_user
+ from .permission import permission_sync_to_user
- successfull_apps = self.targets.list("apps", include=["Success", "Warning"])
-
- permission_sync_to_user(force=False)
+ permission_sync_to_user()
if os.path.ismount(self.work_dir):
ret = subprocess.call(["umount", self.work_dir])
if ret != 0:
- logger.warning(m18n.n('restore_cleaning_failed'))
+ logger.warning(m18n.n("restore_cleaning_failed"))
filesystem.rm(self.work_dir, recursive=True, force=True)
#
@@ -942,13 +965,11 @@ class RestoreManager():
"""
def unknown_error(part):
- logger.error(m18n.n("backup_archive_system_part_not_available",
- part=part))
+ logger.error(m18n.n("backup_archive_system_part_not_available", part=part))
- target_list = self.targets.set_wanted("system",
- system_parts,
- self.info['system'].keys(),
- unknown_error)
+ target_list = self.targets.set_wanted(
+ "system", system_parts, self.info["system"].keys(), unknown_error
+ )
# Now we need to check that the restore hook is actually available for
# all targets we want to restore
@@ -956,6 +977,9 @@ class RestoreManager():
# These are the hooks on the current installation
available_restore_system_hooks = hook_list("restore")["hooks"]
+ custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, "restore")
+ filesystem.mkdir(custom_restore_hook_folder, 755, parents=True, force=True)
+
for system_part in target_list:
# By default, we'll use the restore hooks on the current install
# if available
@@ -967,24 +991,30 @@ class RestoreManager():
continue
# Otherwise, attempt to find it (or them?) in the archive
- hook_paths = '{:s}/hooks/restore/*-{:s}'.format(self.work_dir, system_part)
- hook_paths = glob(hook_paths)
# If we didn't find it, we ain't gonna be able to restore it
- if len(hook_paths) == 0:
- logger.exception(m18n.n('restore_hook_unavailable', part=system_part))
+ if (
+ system_part not in self.info["system"]
+ or "paths" not in self.info["system"][system_part]
+ or len(self.info["system"][system_part]["paths"]) == 0
+ ):
+ logger.error(m18n.n("restore_hook_unavailable", part=system_part))
self.targets.set_result("system", system_part, "Skipped")
continue
+ hook_paths = self.info["system"][system_part]["paths"]
+ hook_paths = ["hooks/restore/%s" % os.path.basename(p) for p in hook_paths]
+
# Otherwise, add it from the archive to the system
# FIXME: Refactor hook_add and use it instead
- custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, 'restore')
- filesystem.mkdir(custom_restore_hook_folder, 755, True)
for hook_path in hook_paths:
- logger.debug("Adding restoration script '%s' to the system "
- "from the backup archive '%s'", hook_path,
- self.archive_path)
- shutil.copy(hook_path, custom_restore_hook_folder)
+ logger.debug(
+ "Adding restoration script '%s' to the system "
+ "from the backup archive '%s'",
+ hook_path,
+ self.archive_path,
+ )
+ self.method.copy(hook_path, custom_restore_hook_folder)
def set_apps_targets(self, apps=[]):
"""
@@ -997,13 +1027,28 @@ class RestoreManager():
"""
def unknown_error(app):
- logger.error(m18n.n('backup_archive_app_not_found',
- app=app))
+ logger.error(m18n.n("backup_archive_app_not_found", app=app))
- self.targets.set_wanted("apps",
- apps,
- self.info['apps'].keys(),
- unknown_error)
+ to_be_restored = self.targets.set_wanted(
+ "apps", apps, self.info["apps"].keys(), unknown_error
+ )
+
+ # If all apps to restore are already installed, stop right here.
+ # Otherwise, if at least one app can be restored, we keep going on
+ # because those which can be restored will indeed be restored
+ already_installed = [app for app in to_be_restored if _is_installed(app)]
+ if already_installed != []:
+ if already_installed == to_be_restored:
+ raise YunohostValidationError(
+ "restore_already_installed_apps", apps=", ".join(already_installed)
+ )
+ else:
+ logger.warning(
+ m18n.n(
+ "restore_already_installed_apps",
+ apps=", ".join(already_installed),
+ )
+ )
#
# Archive mounting #
@@ -1016,35 +1061,31 @@ class RestoreManager():
Use the mount method from the BackupMethod instance and read info about
this archive
-
- Exceptions:
- restore_removing_tmp_dir_failed -- Raised if it's not possible to remove
- the working directory
"""
self.work_dir = os.path.join(BACKUP_PATH, "tmp", self.name)
if os.path.ismount(self.work_dir):
- logger.debug("An already mounting point '%s' already exists",
- self.work_dir)
- ret = subprocess.call(['umount', self.work_dir])
+ logger.debug("An already mounting point '%s' already exists", self.work_dir)
+ ret = subprocess.call(["umount", self.work_dir])
if ret == 0:
- subprocess.call(['rmdir', self.work_dir])
+ subprocess.call(["rmdir", self.work_dir])
logger.debug("Unmount dir: {}".format(self.work_dir))
else:
- raise YunohostError('restore_removing_tmp_dir_failed')
+ raise YunohostError("restore_removing_tmp_dir_failed")
elif os.path.isdir(self.work_dir):
- logger.debug("temporary restore directory '%s' already exists",
- self.work_dir)
- ret = subprocess.call(['rm', '-Rf', self.work_dir])
+ logger.debug(
+ "temporary restore directory '%s' already exists", self.work_dir
+ )
+ ret = subprocess.call(["rm", "-Rf", self.work_dir])
if ret == 0:
logger.debug("Delete dir: {}".format(self.work_dir))
else:
- raise YunohostError('restore_removing_tmp_dir_failed')
+ raise YunohostError("restore_removing_tmp_dir_failed")
filesystem.mkdir(self.work_dir, parents=True)
- self.method.mount(self)
+ self.method.mount()
self._read_info_files()
@@ -1063,41 +1104,38 @@ class RestoreManager():
"""
system = self.targets.list("system", exclude=["Skipped"])
apps = self.targets.list("apps", exclude=["Skipped"])
- restore_all_system = (system == self.info['system'].keys())
- restore_all_apps = (apps == self.info['apps'].keys())
+ restore_all_system = system == self.info["system"].keys()
+ restore_all_apps = apps == self.info["apps"].keys()
# If complete restore operations (or legacy archive)
margin = CONF_MARGIN_SPACE_SIZE * 1024 * 1024
- if (restore_all_system and restore_all_apps) or 'size_details' not in self.info:
- size = self.info['size']
- if 'size_details' not in self.info or \
- self.info['size_details']['apps'] != {}:
+ if (restore_all_system and restore_all_apps) or "size_details" not in self.info:
+ size = self.info["size"]
+ if (
+ "size_details" not in self.info
+ or self.info["size_details"]["apps"] != {}
+ ):
margin = APP_MARGIN_SPACE_SIZE * 1024 * 1024
# Partial restore don't need all backup size
else:
size = 0
if system is not None:
for system_element in system:
- size += self.info['size_details']['system'][system_element]
+ size += self.info["size_details"]["system"][system_element]
# TODO how to know the dependencies size ?
if apps is not None:
for app in apps:
- size += self.info['size_details']['apps'][app]
+ size += self.info["size_details"]["apps"][app]
margin = APP_MARGIN_SPACE_SIZE * 1024 * 1024
- if not os.path.isfile('/etc/yunohost/installed'):
+ if not os.path.isfile("/etc/yunohost/installed"):
size += POSTINSTALL_ESTIMATE_SPACE_SIZE * 1024 * 1024
return (size, margin)
def assert_enough_free_space(self):
"""
Check available disk space
-
- Exceptions:
- restore_may_be_not_enough_disk_space -- Raised if there isn't enough
- space to cover the security margin space
- restore_not_enough_disk_space -- Raised if there isn't enough space
"""
free_space = free_space_in_directory(BACKUP_PATH)
@@ -1107,9 +1145,19 @@ class RestoreManager():
return True
elif free_space > needed_space:
# TODO Add --force options to avoid the error raising
- raise YunohostError('restore_may_be_not_enough_disk_space', free_space=free_space, needed_space=needed_space, margin=margin)
+ raise YunohostValidationError(
+ "restore_may_be_not_enough_disk_space",
+ free_space=free_space,
+ needed_space=needed_space,
+ margin=margin,
+ )
else:
- raise YunohostError('restore_not_enough_disk_space', free_space=free_space, needed_space=needed_space, margin=margin)
+ raise YunohostValidationError(
+ "restore_not_enough_disk_space",
+ free_space=free_space,
+ needed_space=needed_space,
+ margin=margin,
+ )
#
# "Actual restore" (reverse step of the backup collect part) #
@@ -1127,55 +1175,51 @@ class RestoreManager():
self._postinstall_if_needed()
# Apply dirty patch to redirect php5 file on php7
- self._patch_backup_csv_file()
+ self._patch_legacy_php_versions_in_csv_file()
self._restore_system()
self._restore_apps()
+ except Exception as e:
+ raise YunohostError(
+ "The following critical error happened during restoration: %s" % e
+ )
finally:
self.clean()
- def _patch_backup_csv_file(self):
+ def _patch_legacy_php_versions_in_csv_file(self):
"""
- Apply dirty patch to redirect php5 file on php7
+ Apply dirty patch to redirect php5 and php7.0 files to php7.3
"""
- backup_csv = os.path.join(self.work_dir, 'backup.csv')
+ backup_csv = os.path.join(self.work_dir, "backup.csv")
if not os.path.isfile(backup_csv):
return
- try:
- contains_php5 = False
- with open(backup_csv) as csvfile:
- reader = csv.DictReader(csvfile, fieldnames=['source', 'dest'])
- newlines = []
- for row in reader:
- if 'php5' in row['source']:
- contains_php5 = True
- row['source'] = row['source'].replace('/etc/php5', '/etc/php/7.0') \
- .replace('/var/run/php5-fpm', '/var/run/php/php7.0-fpm') \
- .replace('php5', 'php7')
+ replaced_something = False
+ with open(backup_csv) as csvfile:
+ reader = csv.DictReader(csvfile, fieldnames=["source", "dest"])
+ newlines = []
+ for row in reader:
+ for pattern, replace in LEGACY_PHP_VERSION_REPLACEMENTS:
+ if pattern in row["source"]:
+ replaced_something = True
+ row["source"] = row["source"].replace(pattern, replace)
- newlines.append(row)
- except (IOError, OSError, csv.Error) as e:
- raise YunohostError('error_reading_file', file=backup_csv, error=str(e))
+ newlines.append(row)
- if not contains_php5:
+ if not replaced_something:
return
- try:
- with open(backup_csv, 'w') as csvfile:
- writer = csv.DictWriter(csvfile,
- fieldnames=['source', 'dest'],
- quoting=csv.QUOTE_ALL)
- for row in newlines:
- writer.writerow(row)
- except (IOError, OSError, csv.Error) as e:
- logger.warning(m18n.n('backup_php5_to_php7_migration_may_fail',
- error=str(e)))
+ with open(backup_csv, "w") as csvfile:
+ writer = csv.DictWriter(
+ csvfile, fieldnames=["source", "dest"], quoting=csv.QUOTE_ALL
+ )
+ for row in newlines:
+ writer.writerow(row)
def _restore_system(self):
- """ Restore user and system parts """
+ """Restore user and system parts"""
system_targets = self.targets.list("system", exclude=["Skipped"])
@@ -1183,87 +1227,99 @@ class RestoreManager():
if system_targets == []:
return
- from yunohost.utils.ldap import _get_ldap_interface
- ldap = _get_ldap_interface()
+ from yunohost.permission import (
+ permission_create,
+ permission_delete,
+ user_permission_list,
+ permission_sync_to_user,
+ )
# Backup old permission for apps
# We need to do that because in case of an app is installed we can't remove the permission for this app
- old_apps_permission = []
- try:
- old_apps_permission = ldap.search('ou=permission,dc=yunohost,dc=org',
- '(&(objectClass=permissionYnh)(!(cn=main.mail))(!(cn=main.metronome))(!(cn=main.sftp)))',
- ['cn', 'objectClass', 'groupPermission', 'URL', 'gidNumber'])
- except:
- logger.info(m18n.n('apps_permission_not_found'))
+ old_apps_permission = user_permission_list(ignore_system_perms=True, full=True)[
+ "permissions"
+ ]
# Start register change on system
- operation_logger = OperationLogger('backup_restore_system')
+ operation_logger = OperationLogger("backup_restore_system")
operation_logger.start()
- logger.debug(m18n.n('restore_running_hooks'))
+ logger.debug(m18n.n("restore_running_hooks"))
- env_dict = self._get_env_var()
- operation_logger.extra['env'] = env_dict
+ env_dict = {
+ "YNH_BACKUP_DIR": self.work_dir,
+ "YNH_BACKUP_CSV": os.path.join(self.work_dir, "backup.csv"),
+ }
+ operation_logger.extra["env"] = env_dict
operation_logger.flush()
- ret = hook_callback('restore',
- system_targets,
- args=[self.work_dir],
- env=env_dict,
- chdir=self.work_dir)
+ ret = hook_callback(
+ "restore",
+ system_targets,
+ args=[self.work_dir],
+ env=env_dict,
+ chdir=self.work_dir,
+ )
- ret_succeed = [hook for hook, infos in ret.items()
- if any(result["state"] == "succeed" for result in infos.values())]
- ret_failed = [hook for hook, infos in ret.items()
- if any(result["state"] == "failed" for result in infos.values())]
+ ret_succeed = [
+ hook
+ for hook, infos in ret.items()
+ if any(result["state"] == "succeed" for result in infos.values())
+ ]
+ ret_failed = [
+ hook
+ for hook, infos in ret.items()
+ if any(result["state"] == "failed" for result in infos.values())
+ ]
for part in ret_succeed:
self.targets.set_result("system", part, "Success")
error_part = []
for part in ret_failed:
- logger.error(m18n.n('restore_system_part_failed', part=part))
+ logger.error(m18n.n("restore_system_part_failed", part=part))
self.targets.set_result("system", part, "Error")
error_part.append(part)
if ret_failed:
- operation_logger.error(m18n.n('restore_system_part_failed', part=', '.join(error_part)))
+ operation_logger.error(
+ m18n.n("restore_system_part_failed", part=", ".join(error_part))
+ )
else:
operation_logger.success()
+ yunohost.domain.domain_list_cache = {}
+
regen_conf()
- # Check if we need to do the migration 0009 : setup group and permission
- # Legacy code
- result = ldap.search('ou=groups,dc=yunohost,dc=org',
- '(&(objectclass=groupOfNamesYnh)(cn=all_users))',
- ['cn'])
- if not result:
- from yunohost.tools import _get_migration_by_name
- setup_group_permission = _get_migration_by_name("setup_group_permission")
- # Update LDAP schema restart slapd
- logger.info(m18n.n("migration_0011_update_LDAP_schema"))
- regen_conf(names=['slapd'], force=True)
- setup_group_permission.migrate_LDAP_db()
+ _tools_migrations_run_after_system_restore(
+ backup_version=self.info["from_yunohost_version"]
+ )
- # Remove all permission for all app which sill in the LDAP
- for per in ldap.search('ou=permission,dc=yunohost,dc=org',
- '(&(objectClass=permissionYnh)(!(cn=main.mail))(!(cn=main.metronome))(!(cn=main.sftp)))',
- ['cn']):
- if not ldap.remove('cn=%s,ou=permission' % per['cn'][0]):
- raise YunohostError('permission_deletion_failed',
- permission=per['cn'][0].split('.')[0],
- app=per['cn'][0].split('.')[1])
+ # Remove all permission for all app still in the LDAP
+ for permission_name in user_permission_list(ignore_system_perms=True)[
+ "permissions"
+ ].keys():
+ permission_delete(permission_name, force=True, sync_perm=False)
- # Restore permission for the app which is installed
- for per in old_apps_permission:
- try:
- permission_name, app_name = per['cn'][0].split('.')
- except:
- logger.warning(m18n.n('permission_name_not_valid', permission=per['cn'][0]))
+ # Restore permission for apps installed
+ for permission_name, permission_infos in old_apps_permission.items():
+ app_name, perm_name = permission_name.split(".")
if _is_installed(app_name):
- if not ldap.add('cn=%s,ou=permission' % per['cn'][0], per):
- raise YunohostError('apps_permission_restoration_failed', permission=permission_name, app=app_name)
+ permission_create(
+ permission_name,
+ allowed=permission_infos["allowed"],
+ url=permission_infos["url"],
+ additional_urls=permission_infos["additional_urls"],
+ auth_header=permission_infos["auth_header"],
+ label=permission_infos["label"]
+ if perm_name == "main"
+ else permission_infos["sublabel"],
+ show_tile=permission_infos["show_tile"],
+ protected=permission_infos["protected"],
+ sync_perm=False,
+ )
+ permission_sync_to_user()
def _restore_apps(self):
"""Restore all apps targeted"""
@@ -1271,7 +1327,6 @@ class RestoreManager():
apps_targets = self.targets.list("apps", exclude=["Skipped"])
for app in apps_targets:
- print(app)
self._restore_app(app)
def _restore_app(self, app_instance_name):
@@ -1295,17 +1350,14 @@ class RestoreManager():
Args:
app_instance_name -- (string) The app name to restore (no app with this
name should be already install)
-
- Exceptions:
- restore_already_installed_app -- Raised if an app with this app instance
- name already exists
- restore_app_failed -- Raised if the restore bash script failed
"""
- from moulinette.utils.filesystem import read_ldif
from yunohost.user import user_group_list
- from yunohost.permission import permission_remove
- from yunohost.utils.ldap import _get_ldap_interface
- ldap = _get_ldap_interface()
+ from yunohost.permission import (
+ permission_create,
+ permission_delete,
+ user_permission_list,
+ permission_sync_to_user,
+ )
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
@@ -1316,166 +1368,206 @@ class RestoreManager():
else:
shutil.copy2(s, d)
+ # Check if the app is not already installed
+ if _is_installed(app_instance_name):
+ logger.error(m18n.n("restore_already_installed_app", app=app_instance_name))
+ self.targets.set_result("apps", app_instance_name, "Error")
+ return
+
# Start register change on system
- related_to = [('app', app_instance_name)]
- operation_logger = OperationLogger('backup_restore_app', related_to)
+ related_to = [("app", app_instance_name)]
+ operation_logger = OperationLogger("backup_restore_app", related_to)
operation_logger.start()
logger.info(m18n.n("app_start_restore", app=app_instance_name))
- # Check if the app is not already installed
- if _is_installed(app_instance_name):
- logger.error(m18n.n('restore_already_installed_app',
- app=app_instance_name))
- self.targets.set_result("apps", app_instance_name, "Error")
- return
+ app_dir_in_archive = os.path.join(self.work_dir, "apps", app_instance_name)
+ app_backup_in_archive = os.path.join(app_dir_in_archive, "backup")
+ app_settings_in_archive = os.path.join(app_dir_in_archive, "settings")
+ app_scripts_in_archive = os.path.join(app_settings_in_archive, "scripts")
- app_dir_in_archive = os.path.join(self.work_dir, 'apps', app_instance_name)
- app_backup_in_archive = os.path.join(app_dir_in_archive, 'backup')
- app_settings_in_archive = os.path.join(app_dir_in_archive, 'settings')
- app_scripts_in_archive = os.path.join(app_settings_in_archive, 'scripts')
+ # Attempt to patch legacy helpers...
+ _patch_legacy_helpers(app_settings_in_archive)
# Apply dirty patch to make php5 apps compatible with php7
- _patch_php5(app_settings_in_archive)
+ _patch_legacy_php_versions(app_settings_in_archive)
+ _patch_legacy_php_versions_in_settings(app_settings_in_archive)
# Delete _common.sh file in backup
- common_file = os.path.join(app_backup_in_archive, '_common.sh')
+ common_file = os.path.join(app_backup_in_archive, "_common.sh")
filesystem.rm(common_file, force=True)
# Check if the app has a restore script
- app_restore_script_in_archive = os.path.join(app_scripts_in_archive,
- 'restore')
+ app_restore_script_in_archive = os.path.join(app_scripts_in_archive, "restore")
if not os.path.isfile(app_restore_script_in_archive):
- logger.warning(m18n.n('unrestore_app', app=app_instance_name))
+ logger.warning(m18n.n("unrestore_app", app=app_instance_name))
self.targets.set_result("apps", app_instance_name, "Warning")
return
- logger.debug(m18n.n('restore_running_app_script', app=app_instance_name))
try:
# Restore app settings
- app_settings_new_path = os.path.join('/etc/yunohost/apps/',
- app_instance_name)
- app_scripts_new_path = os.path.join(app_settings_new_path, 'scripts')
+ app_settings_new_path = os.path.join(
+ "/etc/yunohost/apps/", app_instance_name
+ )
+ app_scripts_new_path = os.path.join(app_settings_new_path, "scripts")
shutil.copytree(app_settings_in_archive, app_settings_new_path)
filesystem.chmod(app_settings_new_path, 0o400, 0o400, True)
- filesystem.chown(app_scripts_new_path, 'admin', None, True)
+ filesystem.chown(app_scripts_new_path, "root", None, True)
# Copy the app scripts to a writable temporary folder
- # FIXME : use 'install -Dm555' or something similar to what's done
- # in the backup method ?
- tmp_folder_for_app_restore = tempfile.mkdtemp(prefix='restore')
- copytree(app_scripts_in_archive, tmp_folder_for_app_restore)
- filesystem.chmod(tmp_folder_for_app_restore, 0o550, 0o550, True)
- filesystem.chown(tmp_folder_for_app_restore, 'admin', None, True)
- restore_script = os.path.join(tmp_folder_for_app_restore, 'restore')
+ tmp_workdir_for_app = _make_tmp_workdir_for_app()
+ copytree(app_scripts_in_archive, tmp_workdir_for_app)
+ filesystem.chmod(tmp_workdir_for_app, 0o700, 0o700, True)
+ filesystem.chown(tmp_workdir_for_app, "root", None, True)
+ restore_script = os.path.join(tmp_workdir_for_app, "restore")
# Restore permissions
- if os.path.isfile(app_settings_in_archive + '/permission.ldif'):
- filtred_entries = ['entryUUID', 'creatorsName', 'createTimestamp', 'entryCSN', 'structuralObjectClass',
- 'modifiersName', 'modifyTimestamp', 'inheritPermission', 'memberUid']
- entries = read_ldif('%s/permission.ldif' % app_settings_in_archive, filtred_entries)
- group_list = user_group_list(['cn'])['groups']
- for dn, entry in entries:
- # Remove the group which has been removed
- for group in entry['groupPermission']:
- group_name = group.split(',')[0].split('=')[1]
- if group_name not in group_list:
- entry['groupPermission'].remove(group)
- if not ldap.add('cn=%s,ou=permission' % entry['cn'][0], entry):
- raise YunohostError('apps_permission_restoration_failed',
- permission=entry['cn'][0].split('.')[0],
- app=entry['cn'][0].split('.')[1])
- else:
- from yunohost.tools import _get_migration_by_name
- setup_group_permission = _get_migration_by_name("setup_group_permission")
- setup_group_permission.migrate_app_permission(app=app_instance_name)
+ if not os.path.isfile("%s/permissions.yml" % app_settings_new_path):
+ raise YunohostError(
+ "Didnt find a permssions.yml for the app !?", raw_msg=True
+ )
- # Prepare env. var. to pass to script
- env_dict = self._get_env_var(app_instance_name)
+ permissions = read_yaml("%s/permissions.yml" % app_settings_new_path)
+ existing_groups = user_group_list()["groups"]
- operation_logger.extra['env'] = env_dict
- operation_logger.flush()
+ for permission_name, permission_infos in permissions.items():
- # Execute app restore script
- hook_exec(restore_script,
- args=[app_backup_in_archive, app_instance_name],
- chdir=app_backup_in_archive,
- raise_on_error=True,
- env=env_dict)[0]
- except:
- msg = m18n.n('restore_app_failed', app=app_instance_name)
- logger.exception(msg)
+ if "allowed" not in permission_infos:
+ logger.warning(
+ "'allowed' key corresponding to allowed groups for permission %s not found when restoring app %s … You might have to reconfigure permissions yourself."
+ % (permission_name, app_instance_name)
+ )
+ should_be_allowed = ["all_users"]
+ else:
+ should_be_allowed = [
+ g for g in permission_infos["allowed"] if g in existing_groups
+ ]
+
+ perm_name = permission_name.split(".")[1]
+ permission_create(
+ permission_name,
+ allowed=should_be_allowed,
+ url=permission_infos.get("url"),
+ additional_urls=permission_infos.get("additional_urls"),
+ auth_header=permission_infos.get("auth_header"),
+ label=permission_infos.get("label")
+ if perm_name == "main"
+ else permission_infos.get("sublabel"),
+ show_tile=permission_infos.get("show_tile", True),
+ protected=permission_infos.get("protected", False),
+ sync_perm=False,
+ )
+
+ permission_sync_to_user()
+
+ os.remove("%s/permissions.yml" % app_settings_new_path)
+
+ _tools_migrations_run_before_app_restore(
+ backup_version=self.info["from_yunohost_version"],
+ app_id=app_instance_name,
+ )
+ except Exception:
+ import traceback
+
+ error = m18n.n("unexpected_error", error="\n" + traceback.format_exc())
+ msg = m18n.n("app_restore_failed", app=app_instance_name, error=error)
+ logger.error(msg)
operation_logger.error(msg)
self.targets.set_result("apps", app_instance_name, "Error")
- remove_script = os.path.join(app_scripts_in_archive, 'remove')
-
- # Setup environment for remove script
- app_id, app_instance_nb = _parse_app_instance_name(app_instance_name)
- env_dict_remove = {}
- env_dict_remove["YNH_APP_ID"] = app_id
- env_dict_remove["YNH_APP_INSTANCE_NAME"] = app_instance_name
- env_dict_remove["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb)
-
- operation_logger = OperationLogger('remove_on_failed_restore',
- [('app', app_instance_name)],
- env=env_dict_remove)
- operation_logger.start()
-
- # Execute remove script
- # TODO: call app_remove instead
- if hook_exec(remove_script, args=[app_instance_name],
- env=env_dict_remove)[0] != 0:
- msg = m18n.n('app_not_properly_removed', app=app_instance_name)
- logger.warning(msg)
- operation_logger.error(msg)
- else:
- operation_logger.success()
-
- # Cleaning app directory
+ # Cleanup
shutil.rmtree(app_settings_new_path, ignore_errors=True)
+ shutil.rmtree(tmp_workdir_for_app, ignore_errors=True)
- # Remove all permission in LDAP
- result = ldap.search(base='ou=permission,dc=yunohost,dc=org',
- filter='(&(objectclass=permissionYnh)(cn=*.%s))' % app_instance_name, attrs=['cn'])
- permission_list = [p['cn'][0] for p in result]
- for l in permission_list:
- permission_remove(app_instance_name, l.split('.')[0], force=True)
+ return
- # TODO Cleaning app hooks
- else:
- self.targets.set_result("apps", app_instance_name, "Success")
- operation_logger.success()
+ logger.debug(m18n.n("restore_running_app_script", app=app_instance_name))
+
+ # Prepare env. var. to pass to script
+ env_dict = _make_environment_for_app_script(app_instance_name)
+ env_dict.update(
+ {
+ "YNH_BACKUP_DIR": self.work_dir,
+ "YNH_BACKUP_CSV": os.path.join(self.work_dir, "backup.csv"),
+ "YNH_APP_BACKUP_DIR": os.path.join(
+ self.work_dir, "apps", app_instance_name, "backup"
+ ),
+ "YNH_APP_BASEDIR": os.path.join(
+ self.work_dir, "apps", app_instance_name, "settings"
+ ),
+ }
+ )
+
+ operation_logger.extra["env"] = env_dict
+ operation_logger.flush()
+
+ # Execute the app install script
+ restore_failed = True
+ try:
+ (
+ restore_failed,
+ failure_message_with_debug_instructions,
+ ) = hook_exec_with_script_debug_if_failure(
+ restore_script,
+ chdir=app_backup_in_archive,
+ env=env_dict,
+ operation_logger=operation_logger,
+ error_message_if_script_failed=m18n.n("app_restore_script_failed"),
+ error_message_if_failed=lambda e: m18n.n(
+ "app_restore_failed", app=app_instance_name, error=e
+ ),
+ )
finally:
# Cleaning temporary scripts directory
- shutil.rmtree(tmp_folder_for_app_restore, ignore_errors=True)
+ shutil.rmtree(tmp_workdir_for_app, ignore_errors=True)
- def _get_env_var(self, app=None):
- """ Define environment variable for hooks call """
- env_var = {}
- env_var['YNH_BACKUP_DIR'] = self.work_dir
- env_var['YNH_BACKUP_CSV'] = os.path.join(self.work_dir, "backup.csv")
+ if not restore_failed:
+ self.targets.set_result("apps", app_instance_name, "Success")
+ operation_logger.success()
+ else:
- if app is not None:
- app_dir_in_archive = os.path.join(self.work_dir, 'apps', app)
- app_backup_in_archive = os.path.join(app_dir_in_archive, 'backup')
+ self.targets.set_result("apps", app_instance_name, "Error")
- # Parse app instance name and id
- app_id, app_instance_nb = _parse_app_instance_name(app)
+ remove_script = os.path.join(app_scripts_in_archive, "remove")
- env_var["YNH_APP_ID"] = app_id
- env_var["YNH_APP_INSTANCE_NAME"] = app
- env_var["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb)
- env_var["YNH_APP_BACKUP_DIR"] = app_backup_in_archive
+ # Setup environment for remove script
+ env_dict_remove = _make_environment_for_app_script(app_instance_name)
+ env_dict_remove["YNH_APP_BASEDIR"] = os.path.join(
+ self.work_dir, "apps", app_instance_name, "settings"
+ )
+
+ remove_operation_logger = OperationLogger(
+ "remove_on_failed_restore",
+ [("app", app_instance_name)],
+ env=env_dict_remove,
+ )
+ remove_operation_logger.start()
+
+ # Execute remove script
+ if hook_exec(remove_script, env=env_dict_remove)[0] != 0:
+ msg = m18n.n("app_not_properly_removed", app=app_instance_name)
+ logger.warning(msg)
+ remove_operation_logger.error(msg)
+ else:
+ remove_operation_logger.success()
+
+ # Cleaning app directory
+ shutil.rmtree(app_settings_new_path, ignore_errors=True)
+
+ # Remove all permission in LDAP for this app
+ for permission_name in user_permission_list()["permissions"].keys():
+ if permission_name.startswith(app_instance_name + "."):
+ permission_delete(permission_name, force=True)
+
+ # TODO Cleaning app hooks
+
+ logger.error(failure_message_with_debug_instructions)
- return env_var
#
# Backup methods #
#
-
-
class BackupMethod(object):
"""
@@ -1497,7 +1589,7 @@ class BackupMethod(object):
TarBackupMethod
---------------
- This method compresses all files to backup in a .tar.gz archive. When
+ This method compresses all files to backup in a .tar archive. When
restoring, it untars the required parts.
CustomBackupMethod
@@ -1510,20 +1602,37 @@ class BackupMethod(object):
method_name
Public methods:
- mount_and_backup(self, backup_manager)
- mount(self, restore_manager)
+ mount_and_backup(self)
+ mount(self)
create(cls, method, **kwargs)
info(archive_name)
Usage:
- method = BackupMethod.create("tar")
- method.mount_and_backup(backup_manager)
+ method = BackupMethod.create("tar", backup_manager)
+ method.mount_and_backup()
#or
- method = BackupMethod.create("copy")
- method.mount(restore_manager)
+ method = BackupMethod.create("copy", restore_manager)
+ method.mount()
"""
- def __init__(self, repo=None):
+ @classmethod
+ def create(cls, method, manager, **kwargs):
+ """
+ Factory method to create instance of BackupMethod
+
+ Args:
+ method -- (string) The method name of an existing BackupMethod. If the
+ name is unknown the CustomBackupMethod will be tried
+ *args -- Specific args for the method, could be the repo target by the
+ method
+
+ Return a BackupMethod instance
+ """
+ known_methods = {c.method_name: c for c in BackupMethod.__subclasses__()}
+ backup_method = known_methods.get(method, CustomBackupMethod)
+ return backup_method(manager, method=method, **kwargs)
+
+ def __init__(self, manager, repo=None, **kwargs):
"""
BackupMethod constructors
@@ -1536,6 +1645,7 @@ class BackupMethod(object):
BackupRepository object. If None, the default repo is used :
/home/yunohost.backup/archives/
"""
+ self.manager = manager
if not repo or isinstance(repo, basestring):
repo = BackupRepository.get_or_create(ARCHIVES_PATH)
self.repo = repo
@@ -1543,13 +1653,13 @@ class BackupMethod(object):
@property
def method_name(self):
"""Return the string name of a BackupMethod (eg "tar" or "copy")"""
- raise YunohostError('backup_abstract_method')
+ raise YunohostError("backup_abstract_method")
@property
def archive_path(self):
"""Return the archive path"""
return self.repo.location + '::' + self.name
-
+
@property
def name(self):
"""Return the backup name"""
@@ -1588,18 +1698,13 @@ class BackupMethod(object):
"""
return False
- def mount_and_backup(self, backup_manager):
+ def mount_and_backup(self):
"""
Run the backup on files listed by the BackupManager instance
This method shouldn't be overrided, prefer overriding self.backup() and
self.clean()
-
- Args:
- backup_manager -- (BackupManager) A backup manager instance that has
- already done the files collection step.
"""
- self.manager = backup_manager
if self.need_mount():
self._organize_files()
@@ -1608,21 +1713,17 @@ class BackupMethod(object):
finally:
self.clean()
- def mount(self, restore_manager):
+ def mount(self):
"""
Mount the archive from RestoreManager instance in the working directory
This method should be extended.
-
- Args:
- restore_manager -- (RestoreManager) A restore manager instance
- contains an archive to restore.
"""
- self.manager = restore_manager
+ pass
def info(self, name):
self._assert_archive_exists()
-
+
info_json = self._get_info_string()
if not self._info_json:
raise YunohostError('backup_info_json_not_implemented')
@@ -1631,20 +1732,16 @@ class BackupMethod(object):
except:
logger.debug("unable to load info json", exc_info=1)
raise YunohostError('backup_invalid_archive')
-
+
return info
-
+
def clean(self):
"""
Umount sub directories of working dirextories and delete it if temporary
-
- Exceptions:
- backup_cleaning_failed -- Raise if we were not able to unmount sub
- directories of the working directories
"""
if self.need_mount():
if not _recursive_umount(self.work_dir):
- raise YunohostError('backup_cleaning_failed')
+ raise YunohostError("backup_cleaning_failed")
if self.manager.is_tmp_work_dir:
filesystem.rm(self.work_dir, True, True)
@@ -1652,9 +1749,6 @@ class BackupMethod(object):
def _check_is_enough_free_space(self):
"""
Check free space in repository or output directory before to backup
-
- Exceptions:
- not_enough_disk_space -- Raise if there isn't enough space.
"""
# TODO How to do with distant repo or with deduplicated backup ?
backup_size = self.manager.size
@@ -1662,9 +1756,13 @@ class BackupMethod(object):
free_space = free_space_in_directory(self.repo)
if free_space < backup_size:
- logger.debug('Not enough space at %s (free: %s / needed: %d)',
- self.repo, free_space, backup_size)
- raise YunohostError('not_enough_disk_space', path=self.repo)
+ logger.debug(
+ "Not enough space at %s (free: %s / needed: %d)",
+ self.repo,
+ free_space,
+ backup_size,
+ )
+ raise YunohostValidationError("not_enough_disk_space", path=self.repo)
def _organize_files(self):
"""
@@ -1676,13 +1774,10 @@ class BackupMethod(object):
The usage of binding could be strange for a user because the du -sb
command will return that the working directory is big.
-
- Exceptions:
- backup_unable_to_organize_files
"""
paths_needed_to_be_copied = []
for path in self.manager.paths_to_backup:
- src = path['source']
+ src = path["source"]
if self.manager is RestoreManager:
# TODO Support to run this before a restore (and not only before
@@ -1690,7 +1785,7 @@ class BackupMethod(object):
# be implemented
src = os.path.join(self.unorganized_work_dir, src)
- dest = os.path.join(self.work_dir, path['dest'])
+ dest = os.path.join(self.work_dir, path["dest"])
if dest == src:
continue
dest_dir = os.path.dirname(dest)
@@ -1710,7 +1805,7 @@ class BackupMethod(object):
logger.warning(m18n.n("backup_couldnt_bind", src=src, dest=dest))
# To check if dest is mounted, use /proc/mounts that
# escape spaces as \040
- raw_mounts = read_file("/proc/mounts").strip().split('\n')
+ raw_mounts = read_file("/proc/mounts").strip().split("\n")
mounts = [m.split()[1] for m in raw_mounts]
mounts = [m.replace("\\040", " ") for m in mounts]
if dest in mounts:
@@ -1726,7 +1821,7 @@ class BackupMethod(object):
if os.stat(src).st_dev == os.stat(dest_dir).st_dev:
# Don't hardlink /etc/cron.d files to avoid cron bug
# 'NUMBER OF HARD LINKS > 1' see #1043
- cron_path = os.path.abspath('/etc/cron') + '.'
+ cron_path = os.path.abspath("/etc/cron") + "."
if not os.path.abspath(src).startswith(cron_path):
try:
os.link(src, dest)
@@ -1736,7 +1831,10 @@ class BackupMethod(object):
# E.g. this happens when running an encrypted hard drive
# where everything is mapped to /dev/mapper/some-stuff
# yet there are different devices behind it or idk ...
- logger.warning("Could not link %s to %s (%s) ... falling back to regular copy." % (src, dest, str(e)))
+ logger.warning(
+ "Could not link %s to %s (%s) ... falling back to regular copy."
+ % (src, dest, str(e))
+ )
else:
# Success, go to next file to organize
continue
@@ -1752,59 +1850,33 @@ class BackupMethod(object):
# to mounting error
# Compute size to copy
- size = sum(disk_usage(path['source']) for path in paths_needed_to_be_copied)
- size /= (1024 * 1024) # Convert bytes to megabytes
+ size = sum(disk_usage(path["source"]) for path in paths_needed_to_be_copied)
+ size /= 1024 * 1024 # Convert bytes to megabytes
# Ask confirmation for copying
if size > MB_ALLOWED_TO_ORGANIZE:
try:
- i = msignals.prompt(m18n.n('backup_ask_for_copying_if_needed',
- answers='y/N', size=str(size)))
+ i = Moulinette.prompt(
+ m18n.n(
+ "backup_ask_for_copying_if_needed",
+ answers="y/N",
+ size=str(size),
+ )
+ )
except NotImplemented:
- raise YunohostError('backup_unable_to_organize_files')
+ raise YunohostError("backup_unable_to_organize_files")
else:
- if i != 'y' and i != 'Y':
- raise YunohostError('backup_unable_to_organize_files')
+ if i != "y" and i != "Y":
+ raise YunohostError("backup_unable_to_organize_files")
# Copy unbinded path
- logger.debug(m18n.n('backup_copying_to_organize_the_archive',
- size=str(size)))
+ logger.debug(m18n.n("backup_copying_to_organize_the_archive", size=str(size)))
for path in paths_needed_to_be_copied:
- dest = os.path.join(self.work_dir, path['dest'])
- if os.path.isdir(path['source']):
- shutil.copytree(path['source'], dest, symlinks=True)
+ dest = os.path.join(self.work_dir, path["dest"])
+ if os.path.isdir(path["source"]):
+ shutil.copytree(path["source"], dest, symlinks=True)
else:
- shutil.copy(path['source'], dest)
-
- @classmethod
- def create(cls, method, *args):
- """
- Factory method to create instance of BackupMethod
-
- Args:
- method -- (string) The method name of an existing BackupMethod. If the
- name is unknown the CustomBackupMethod will be tried
-
- ... -- Specific args for the method, could be the repo target by the
- method
-
- Return a BackupMethod instance
- """
- if not isinstance(method, basestring):
- methods = []
- for m in method:
- methods.append(BackupMethod.create(m, *args))
- return methods
-
- bm_class = {
- 'copy': CopyBackupMethod,
- 'tar': TarBackupMethod,
- 'borg': BorgBackupMethod
- }
- if method in ["copy", "tar", "borg"]:
- return bm_class[method](*args)
- else:
- return CustomBackupMethod(method=method, *args)
+ shutil.copy(path["source"], dest)
class CopyBackupMethod(BackupMethod):
@@ -1814,29 +1886,25 @@ class CopyBackupMethod(BackupMethod):
could be the inverse for restoring
"""
- def __init__(self, repo=None):
- super(CopyBackupMethod, self).__init__(repo)
- filesystem.mkdir(self.repo.path, parent=True)
+ # FIXME: filesystem.mkdir(self.repo.path, parent=True)
- @property
- def method_name(self):
- return 'copy'
+ method_name = "copy"
def backup(self):
- """ Copy prepared files into a the repo """
+ """Copy prepared files into a the repo"""
# Check free space in output
self._check_is_enough_free_space()
for path in self.manager.paths_to_backup:
- source = path['source']
- dest = os.path.join(self.repo.path, path['dest'])
+ source = path["source"]
+ dest = os.path.join(self.repo.path, path["dest"])
if source == dest:
logger.debug("Files already copyed")
return
dest_parent = os.path.dirname(dest)
if not os.path.exists(dest_parent):
- filesystem.mkdir(dest_parent, 0o750, True, uid='admin')
+ filesystem.mkdir(dest_parent, 0o700, True, uid="admin")
if os.path.isdir(source):
shutil.copytree(source, dest)
@@ -1846,17 +1914,13 @@ class CopyBackupMethod(BackupMethod):
def mount(self):
"""
Mount the uncompress backup in readonly mode to the working directory
-
- Exceptions:
- backup_no_uncompress_archive_dir -- Raised if the repo doesn't exists
- backup_cant_mount_uncompress_archive -- Raised if the binding failed
"""
# FIXME: This code is untested because there is no way to run it from
# the ynh cli
super(CopyBackupMethod, self).mount()
if not os.path.isdir(self.repo.path):
- raise YunohostError('backup_no_uncompress_archive_dir')
+ raise YunohostError("backup_no_uncompress_archive_dir")
filesystem.mkdir(self.work_dir, parent=True)
ret = subprocess.call(["mount", "-r", "--rbind", self.repo.path,
@@ -1867,27 +1931,38 @@ class CopyBackupMethod(BackupMethod):
logger.warning(m18n.n("bind_mouting_disable"))
subprocess.call(["mountpoint", "-q", self.repo.path,
"&&", "umount", "-R", self.repo.path])
- raise YunohostError('backup_cant_mount_uncompress_archive')
+ raise YunohostError("backup_cant_mount_uncompress_archive")
+
+ logger.warning(
+ "Could not mount the backup in readonly mode with --rbind ... Unmounting"
+ )
+ # FIXME : Does this stuff really works ? '&&' is going to be interpreted as an argument for mounpoint here ... Not as a classical '&&' ...
+ subprocess.call(
+ ["mountpoint", "-q", self.work_dir, "&&", "umount", "-R", self.work_dir]
+ )
+ raise YunohostError("backup_cant_mount_uncompress_archive")
+
+ def copy(self, file, target):
+ shutil.copy(file, target)
class TarBackupMethod(BackupMethod):
- """
- This class compress all files to backup in archive.
- """
-
- def __init__(self, repo=None):
- super(TarBackupMethod, self).__init__(repo)
- filesystem.mkdir(self.repo.path, parent=True)
-
- @property
- def method_name(self):
- return 'tar'
+ # FIXME: filesystem.mkdir(self.repo.path, parent=True)
+ method_name = "tar"
@property
def archive_path(self):
- """Return the compress archive path"""
- return os.path.join(self.repo.path, self.name + '.tar.gz')
+
+ if isinstance(self.manager, BackupManager) and settings_get(
+ "backup.compress_tar_archives"
+ ):
+ return os.path.join(self.repo.path, self.name + ".tar.gz")
+
+ f = os.path.join(self.repo.path, self.name + ".tar")
+ if os.path.exists(f + ".gz"):
+ f += ".gz"
+ return f
def backup(self):
"""
@@ -1895,94 +1970,103 @@ class TarBackupMethod(BackupMethod):
It adds the info.json in /home/yunohost.backup/archives and if the
compress archive isn't located here, add a symlink to the archive to.
-
- Exceptions:
- backup_archive_open_failed -- Raised if we can't open the archive
- backup_creation_failed -- Raised if we can't write in the
- compress archive
"""
if not os.path.exists(self.repo.path):
- filesystem.mkdir(self.repo.path, 0o750, parents=True, uid='admin')
+ filesystem.mkdir(self.repo.path, 0o750, parents=True, uid="admin")
# Check free space in output
self._check_is_enough_free_space()
# Open archive file for writing
try:
- tar = tarfile.open(self.archive_path, "w:gz")
- except:
- logger.debug("unable to open '%s' for writing",
- self.archive_path, exc_info=1)
- raise YunohostError('backup_archive_open_failed')
+ tar = tarfile.open(
+ self.archive_path,
+ "w:gz" if self.archive_path.endswith(".gz") else "w",
+ )
+ except Exception:
+ logger.debug(
+ "unable to open '%s' for writing", self.archive_path, exc_info=1
+ )
+ raise YunohostError("backup_archive_open_failed")
# Add files to the archive
try:
for path in self.manager.paths_to_backup:
# Add the "source" into the archive and transform the path into
# "dest"
- tar.add(path['source'], arcname=path['dest'])
+ tar.add(path["source"], arcname=path["dest"])
except IOError:
- logger.error(m18n.n('backup_archive_writing_error', source=path['source'], archive=self._archive_file, dest=path['dest']), exc_info=1)
- raise YunohostError('backup_creation_failed')
+ logger.error(
+ m18n.n(
+ "backup_archive_writing_error",
+ source=path["source"],
+ archive=self._archive_file,
+ dest=path["dest"],
+ ),
+ exc_info=1,
+ )
+ raise YunohostError("backup_creation_failed")
finally:
tar.close()
# Move info file
- shutil.copy(os.path.join(self.work_dir, 'info.json'),
- os.path.join(self.repo.path, self.name + '.info.json'))
+ shutil.copy(
+ os.path.join(self.work_dir, "info.json"),
+ os.path.join(ARCHIVES_PATH, self.name + ".info.json"),
+ )
# If backuped to a non-default location, keep a symlink of the archive
# to that location
- link = os.path.join(self.repo.path, self.name + '.tar.gz')
+ link = os.path.join(self.repo.path, self.name + ".tar")
if not os.path.isfile(link):
os.symlink(self.archive_path, link)
- def mount(self, restore_manager):
+ def mount(self):
"""
- Mount the archive. We avoid copy to be able to restore on system without
- too many space.
-
- Exceptions:
- backup_archive_open_failed -- Raised if the archive can't be open
+ Mount the archive. We avoid intermediate copies to be able to restore on system with low free space.
"""
- super(TarBackupMethod, self).mount(restore_manager)
-
- # Check file exist and it's not a broken link
- self._assert_archive_exists()
-
- # Check the archive can be open
- try:
- tar = tarfile.open(self.archive_path, "r:gz")
- except:
- logger.debug("cannot open backup archive '%s'",
- self.archive_path, exc_info=1)
- raise YunohostError('backup_archive_open_failed')
-
- # FIXME : Is this really useful to close the archive just to
- # reopen it right after this with the same options ...?
- tar.close()
+ super(TarBackupMethod, self).mount()
# Mount the tarball
logger.debug(m18n.n("restore_extracting"))
- tar = tarfile.open(self._archive_file, "r:gz")
+ try:
+ tar = tarfile.open(
+ self.archive_path,
+ "r:gz" if self.archive_path.endswith(".gz") else "r",
+ )
+ except Exception:
+ logger.debug(
+ "cannot open backup archive '%s'", self.archive_path, exc_info=1
+ )
+ raise YunohostError("backup_archive_open_failed")
+
+ try:
+ files_in_archive = tar.getnames()
+ except (IOError, EOFError, tarfile.ReadError) as e:
+ raise YunohostError(
+ "backup_archive_corrupted", archive=self.archive_path, error=str(e)
+ )
if "info.json" in tar.getnames():
leading_dot = ""
- tar.extract('info.json', path=self.work_dir)
- elif "./info.json" in tar.getnames():
+ tar.extract("info.json", path=self.work_dir)
+ elif "./info.json" in files_in_archive:
leading_dot = "./"
- tar.extract('./info.json', path=self.work_dir)
+ tar.extract("./info.json", path=self.work_dir)
else:
- logger.debug("unable to retrieve 'info.json' inside the archive",
- exc_info=1)
+ logger.debug(
+ "unable to retrieve 'info.json' inside the archive", exc_info=1
+ )
tar.close()
- raise YunohostError('backup_invalid_archive')
+ raise YunohostError(
+ "backup_archive_cant_retrieve_info_json", archive=self.archive_path
+ )
- if "backup.csv" in tar.getnames():
- tar.extract('backup.csv', path=self.work_dir)
- elif "./backup.csv" in tar.getnames():
- tar.extract('./backup.csv', path=self.work_dir)
+ if "backup.csv" in files_in_archive:
+ tar.extract("backup.csv", path=self.work_dir)
+ elif "./backup.csv" in files_in_archive:
+ tar.extract("./backup.csv", path=self.work_dir)
else:
# Old backup archive have no backup.csv file
pass
@@ -2004,47 +2088,58 @@ class TarBackupMethod(BackupMethod):
else:
system_part = system_part.replace("_", "/") + "/"
subdir_and_files = [
- tarinfo for tarinfo in tar.getmembers()
- if tarinfo.name.startswith(leading_dot+system_part)
+ tarinfo
+ for tarinfo in tar.getmembers()
+ if tarinfo.name.startswith(leading_dot + system_part)
]
tar.extractall(members=subdir_and_files, path=self.work_dir)
subdir_and_files = [
- tarinfo for tarinfo in tar.getmembers()
- if tarinfo.name.startswith(leading_dot+"hooks/restore/")
+ tarinfo
+ for tarinfo in tar.getmembers()
+ if tarinfo.name.startswith(leading_dot + "hooks/restore/")
]
tar.extractall(members=subdir_and_files, path=self.work_dir)
# Extract apps backup
for app in apps_targets:
subdir_and_files = [
- tarinfo for tarinfo in tar.getmembers()
- if tarinfo.name.startswith(leading_dot+"apps/" + app)
+ tarinfo
+ for tarinfo in tar.getmembers()
+ if tarinfo.name.startswith(leading_dot + "apps/" + app)
]
tar.extractall(members=subdir_and_files, path=self.work_dir)
+ tar.close()
+
+ def copy(self, file, target):
+ tar = tarfile.open(
+ self._archive_file, "r:gz" if self._archive_file.endswith(".gz") else "r"
+ )
+ file_to_extract = tar.getmember(file)
+ # Remove the path
+ file_to_extract.name = os.path.basename(file_to_extract.name)
+ tar.extract(file_to_extract, path=target)
+ tar.close()
+
def list(self):
- result = []
+ # Get local archives sorted according to last modification time
+ # (we do a realpath() to resolve symlinks)
+ archives = glob("%s/*.tar.gz" % self.repo.path) + glob("%s/*.tar" % self.repo.path)
+ archives = set([os.path.realpath(archive) for archive in archives])
+ archives = sorted(archives, key=lambda x: os.path.getctime(x))
+ # Extract only filename without the extension
- try:
- # Retrieve local archives
- archives = os.listdir(self.repo.path)
- except OSError:
- logger.debug("unable to iterate over local archives", exc_info=1)
- else:
- # Iterate over local archives
- for f in archives:
- try:
- name = f[:f.rindex('.tar.gz')]
- except ValueError:
- continue
- result.append(name)
- result.sort(key=lambda x: os.path.getctime(self.archive_path))
+ def remove_extension(f):
+ if f.endswith(".tar.gz"):
+ return os.path.basename(f)[: -len(".tar.gz")]
+ else:
+ return os.path.basename(f)[: -len(".tar")]
- return result
+ return [remove_extension(f) for f in archives]
def _archive_exists(self):
return os.path.lexists(self.archive_path)
-
+
def _assert_archive_exists(self):
if not self._archive_exists():
raise YunohostError('backup_archive_name_unknown', name=self.name)
@@ -2091,7 +2186,7 @@ class BorgBackupMethod(BackupMethod):
if not self.repo.domain:
filesystem.mkdir(self.repo.path, parent=True)
-
+
cmd = ['borg', 'init', self.repo.location]
if self.repo.quota:
@@ -2101,8 +2196,8 @@ class BorgBackupMethod(BackupMethod):
@property
def method_name(self):
return 'borg'
-
-
+
+
def need_mount(self):
return True
@@ -2115,7 +2210,7 @@ class BorgBackupMethod(BackupMethod):
def mount(self, restore_manager):
""" Extract and mount needed files with borg """
super(BorgBackupMethod, self).mount(restore_manager)
-
+
# Export as tar needed files through a pipe
cmd = ['borg', 'export-tar', self.archive_path, '-']
borg = self._run_borg_command(cmd, stdout=subprocess.PIPE)
@@ -2138,7 +2233,7 @@ class BorgBackupMethod(BackupMethod):
out = self._call('list', cmd)
result = out.strip().splitlines()
return result
-
+
def _assert_archive_exists(self):
""" Trigger an error if archive is missing
@@ -2165,26 +2260,26 @@ class BorgBackupMethod(BackupMethod):
if self.repo.domain:
# TODO Use the best/good key
private_key = "/root/.ssh/ssh_host_ed25519_key"
-
+
# Don't check ssh fingerprint strictly the first time
# TODO improve this by publishing and checking this with DNS
strict = 'yes' if self.repo.domain in open('/root/.ssh/known_hosts').read() else 'no'
env['BORG_RSH'] = "ssh -i %s -oStrictHostKeyChecking=%s"
env['BORG_RSH'] = env['BORG_RSH'] % (private_key, strict)
-
+
# In case, borg need a passphrase to get access to the repo
if self.repo.passphrase:
cmd += ['-e', 'repokey']
env['BORG_PASSPHRASE'] = self.repo.passphrase
return subprocess.Popen(cmd, env=env, stdout=stdout)
-
+
def _call(self, action, cmd):
borg = self._run_borg_command(cmd)
return_code = borg.wait()
if return_code:
raise YunohostError('backup_borg_' + action + '_error')
-
+
out, _ = borg.communicate()
return out
@@ -2199,57 +2294,38 @@ class CustomBackupMethod(BackupMethod):
/etc/yunohost/hooks.d/backup_method/
"""
- def __init__(self, repo=None, method=None, **kwargs):
- super(CustomBackupMethod, self).__init__(repo)
+ method_name = "custom"
+
+ def __init__(self, manager, repo=None, method=None, **kwargs):
+ super(CustomBackupMethod, self).__init__(manager, repo)
self.args = kwargs
self.method = method
self._need_mount = None
- @property
- def method_name(self):
- return 'custom'
-
def need_mount(self):
- """Call the backup_method hook to know if we need to organize files
-
- Exceptions:
- backup_custom_need_mount_error -- Raised if the hook failed
- """
+ """Call the backup_method hook to know if we need to organize files"""
if self._need_mount is not None:
return self._need_mount
- ret = hook_callback('backup_method', [self.method],
- args=['need_mount'])
-
- ret_succeed = [hook for hook, infos in ret.items()
- if any(result["state"] == "succeed" for result in infos.values())]
- self._need_mount = True if ret_succeed else False
- return self._need_mount
+ try:
+ self._call('nedd_mount')
+ except YunohostError:
+ return False
+ return True
def backup(self):
"""
Launch a custom script to backup
-
- Exceptions:
- backup_custom_backup_error -- Raised if the custom script failed
"""
self._call('backup', self.work_dir, self.name, self.repo.location, self.manager.size,
self.manager.description)
- ret_failed = [hook for hook, infos in ret.items()
- if any(result["state"] == "failed" for result in infos.values())]
- if ret_failed:
- raise YunohostError('backup_custom_backup_error')
-
- def mount(self, restore_manager):
+ def mount(self):
"""
Launch a custom script to mount the custom archive
-
- Exceptions:
- backup_custom_mount_error -- Raised if the custom script failed
"""
- super(CustomBackupMethod, self).mount(restore_manager)
+ super().mount()
self._call('mount', self.work_dir, self.name, self.repo.location, self.manager.size,
self.manager.description)
@@ -2262,7 +2338,7 @@ class CustomBackupMethod(BackupMethod):
out = self._call('list', self.repo.location)
result = out.strip().splitlines()
return result
-
+
def _assert_archive_exists(self):
""" Trigger an error if archive is missing
@@ -2278,30 +2354,52 @@ class CustomBackupMethod(BackupMethod):
backup_custom_info_error -- Raised if the custom script failed
"""
return self._call('info', self.name, self.repo.location)
-
+
def _call(self, *args):
""" Call a submethod of backup method hook
Exceptions:
backup_custom_ACTION_error -- Raised if the custom script failed
"""
- ret = hook_callback('backup_method', [self.method],
+ ret = hook_callback("backup_method", [self.method],
args=args)
- ret_failed = [hook for hook, infos in ret.items()
- if any(result["state"] == "failed" for result in infos.values())]
- if ret['failed']:
- raise YunohostError('backup_custom_' + args[0] + '_error')
+ ret_failed = [
+ hook
+ for hook, infos in ret.items()
+ if any(result["state"] == "failed" for result in infos.values())
+ ]
+ if ret_failed:
+ raise YunohostError("backup_custom_" + args[0] + "_error")
- return ret['succeed'][self.method]['stdreturn']
+ return ret["succeed"][self.method]["stdreturn"]
+
+ def _get_args(self, action):
+ """Return the arguments to give to the custom script"""
+ return [
+ action,
+ self.work_dir,
+ self.name,
+ self.repo,
+ self.manager.size,
+ self.manager.description,
+ ]
#
# "Front-end" #
#
-def backup_create(name=None, description=None, repos=[],
- system=[], apps=[]):
+@is_unit_operation()
+def backup_create(
+ operation_logger,
+ name=None,
+ description=None,
+ repos=[],
+ system=[],
+ apps=[],
+ dry_run=False,
+):
"""
Create a backup local archive
@@ -2310,7 +2408,6 @@ def backup_create(name=None, description=None, repos=[],
description -- Short description of the backup
method -- Method of backup to use
output_directory -- Output directory for the backup
- no_compress -- Do not create an archive file
system -- List of system elements to backup
apps -- List of application names to backup
"""
@@ -2322,8 +2419,12 @@ def backup_create(name=None, description=None, repos=[],
#
# Validate there is no archive with the same name
- if name and name in backup_list()['archives']:
- raise YunohostError('backup_archive_name_exists')
+ if name and name in backup_list()["archives"]:
+ raise YunohostValidationError("backup_archive_name_exists")
+
+ # By default we backup using the tar method
+ if not methods:
+ methods = ["tar"]
# If no --system or --apps given, backup everything
if system is None and apps is None:
@@ -2334,6 +2435,8 @@ def backup_create(name=None, description=None, repos=[],
# Intialize #
#
+ operation_logger.start()
+
# Create yunohost archives directory if it does not exists
_create_archive_dir()
@@ -2342,16 +2445,21 @@ def backup_create(name=None, description=None, repos=[],
# Add backup methods
if repos == []:
- repos = ['/home/yunohost.backup/archives']
+ repos = ["/home/yunohost.backup/archives"]
for repo in repos:
repo = BackupRepository.get(repo)
backup_manager.add(repo.method)
# Add backup targets (system and apps)
+
backup_manager.set_system_targets(system)
backup_manager.set_apps_targets(apps)
+ for app in backup_manager.targets.list("apps", exclude=["Skipped"]):
+ operation_logger.related_to.append(("app", app))
+ operation_logger.flush()
+
#
# Collect files and put them in the archive #
#
@@ -2359,16 +2467,29 @@ def backup_create(name=None, description=None, repos=[],
# Collect files to be backup (by calling app backup script / system hooks)
backup_manager.collect_files()
+ if dry_run:
+ return {
+ "size": backup_manager.size,
+ "size_details": backup_manager.size_details,
+ }
+
# Apply backup methods on prepared files
logger.info(m18n.n("backup_actually_backuping"))
+ logger.info(
+ m18n.n(
+ "backup_create_size_estimation",
+ size=binary_to_human(backup_manager.size) + "B",
+ )
+ )
backup_manager.backup()
- logger.success(m18n.n('backup_created'))
+ logger.success(m18n.n("backup_created"))
+ operation_logger.success()
return {
- 'name': backup_manager.name,
- 'size': backup_manager.size,
- 'results': backup_manager.targets.results
+ "name": backup_manager.name,
+ "size": backup_manager.size,
+ "results": backup_manager.targets.results,
}
@@ -2392,32 +2513,15 @@ def backup_restore(name, system=[], apps=[], force=False):
system = []
apps = []
- # TODO don't ask this question when restoring apps only and certain system
- # parts
-
- # Check if YunoHost is installed
- if system is not None and os.path.isfile('/etc/yunohost/installed'):
- logger.warning(m18n.n('yunohost_already_installed'))
- if not force:
- try:
- # Ask confirmation for restoring
- i = msignals.prompt(m18n.n('restore_confirm_yunohost_installed',
- answers='y/N'))
- except NotImplemented:
- pass
- else:
- if i == 'y' or i == 'Y':
- force = True
- if not force:
- raise YunohostError('restore_failed')
-
- # TODO Partial app restore could not work if ldap is not restored before
- # TODO repair mysql if broken and it's a complete restore
-
#
# Initialize #
#
+ if name.endswith(".tar.gz"):
+ name = name[: -len(".tar.gz")]
+ elif name.endswith(".tar"):
+ name = name[: -len(".tar")]
+
restore_manager = RestoreManager(name)
restore_manager.set_system_targets(system)
@@ -2425,6 +2529,28 @@ def backup_restore(name, system=[], apps=[], force=False):
restore_manager.assert_enough_free_space()
+ #
+ # Add validation if restoring system parts on an already-installed system
+ #
+
+ if restore_manager.targets.targets["system"] != [] and os.path.isfile(
+ "/etc/yunohost/installed"
+ ):
+ logger.warning(m18n.n("yunohost_already_installed"))
+ if not force:
+ try:
+ # Ask confirmation for restoring
+ i = Moulinette.prompt(
+ m18n.n("restore_confirm_yunohost_installed", answers="y/N")
+ )
+ except NotImplemented:
+ pass
+ else:
+ if i == "y" or i == "Y":
+ force = True
+ if not force:
+ raise YunohostError("restore_failed")
+
#
# Mount the archive then call the restore for each system part / app #
#
@@ -2435,9 +2561,9 @@ def backup_restore(name, system=[], apps=[], force=False):
# Check if something has been restored
if restore_manager.success:
- logger.success(m18n.n('restore_complete'))
+ logger.success(m18n.n("restore_complete"))
else:
- raise YunohostError('restore_nothings_done')
+ raise YunohostError("restore_nothings_done")
return restore_manager.targets.results
@@ -2453,7 +2579,7 @@ def backup_list(repos=[], with_info=False, human_readable=False):
"""
result = OrderedDict()
-
+
if repos == []:
repos = BackupRepository.all()
else:
@@ -2463,7 +2589,7 @@ def backup_list(repos=[], with_info=False, human_readable=False):
for repo in repos:
result[repo.name] = repo.list(with_info)
-
+
# Add details
if result[repo.name] and with_info:
d = OrderedDict()
@@ -2471,12 +2597,52 @@ def backup_list(repos=[], with_info=False, human_readable=False):
try:
d[a] = backup_info(a, repo=repo.location, human_readable=human_readable)
except YunohostError as e:
- logger.warning('%s: %s' % (a, e.strerror))
+ logger.warning(str(e))
+ except Exception:
+ import traceback
+
+ logger.warning(
+ "Could not check infos for archive %s: %s"
+ % (archive, "\n" + traceback.format_exc())
+ )
result[repo.name] = d
-
+
return result
+def backup_download(name):
+ # TODO Integrate in backup methods
+ if Moulinette.interface.type != "api":
+ logger.error(
+ "This option is only meant for the API/webadmin and doesn't make sense for the command line."
+ )
+ return
+
+ archive_file = "%s/%s.tar" % (ARCHIVES_PATH, name)
+
+ # Check file exist (even if it's a broken symlink)
+ if not os.path.lexists(archive_file):
+ archive_file += ".gz"
+ if not os.path.lexists(archive_file):
+ raise YunohostValidationError("backup_archive_name_unknown", name=name)
+
+ # If symlink, retrieve the real path
+ if os.path.islink(archive_file):
+ archive_file = os.path.realpath(archive_file)
+
+ # Raise exception if link is broken (e.g. on unmounted external storage)
+ if not os.path.exists(archive_file):
+ raise YunohostValidationError(
+ "backup_archive_broken_link", path=archive_file
+ )
+
+ # We return a raw bottle HTTPresponse (instead of serializable data like
+ # list/dict, ...), which is gonna be picked and used directly by moulinette
+ from bottle import static_file
+
+ archive_folder, archive_file_name = archive_file.rsplit("/", 1)
+ return static_file(archive_file_name, archive_folder, download=archive_file_name)
+
def backup_info(name, repo=None, with_details=False, human_readable=False):
"""
@@ -2494,15 +2660,15 @@ def backup_info(name, repo=None, with_details=False, human_readable=False):
repo = BackupRepository.get(repo)
info = repo.method.info(name)
-
+
# Historically backup size was not here, in that case we know it's a tar archive
size = info.get('size', 0)
if not size:
- tar = tarfile.open(repo.archive_path, "r:gz")
+ tar = tarfile.open(repo.archive_path, "r:gz" if archive_file.endswith(".gz") else "r")
size = reduce(lambda x, y: getattr(x, 'size', x) + getattr(y, 'size', y),
tar.getmembers())
tar.close()
-
+
result = {
'path': repo.archive_path,
'created_at': datetime.utcfromtimestamp(info['created_at']),
@@ -2521,10 +2687,19 @@ def backup_info(name, repo=None, with_details=False, human_readable=False):
if "size_details" in info.keys():
for category in ["apps", "system"]:
for name, key_info in info[category].items():
+
+ if category == "system":
+ # Stupid legacy fix for weird format between 3.5 and 3.6
+ if isinstance(key_info, dict):
+ key_info = key_info.keys()
+ info[category][name] = key_info = {"paths": key_info}
+ else:
+ info[category][name] = key_info
+
if name in info["size_details"][category].keys():
key_info["size"] = info["size_details"][category][name]
if human_readable:
- key_info["size"] = binary_to_human(key_info["size"]) + 'B'
+ key_info["size"] = binary_to_human(key_info["size"]) + "B"
else:
key_info["size"] = -1
if human_readable:
@@ -2532,6 +2707,7 @@ def backup_info(name, repo=None, with_details=False, human_readable=False):
result["apps"] = info["apps"]
result["system"] = info[system_key]
+ result["from_yunohost_version"] = info.get("from_yunohost_version")
return result
@@ -2545,12 +2721,13 @@ def backup_delete(name):
"""
if name not in backup_list()["archives"]:
- raise YunohostError('backup_archive_name_unknown',
- name=name)
+ raise YunohostValidationError("backup_archive_name_unknown", name=name)
- hook_callback('pre_backup_delete', args=[name])
+ hook_callback("pre_backup_delete", args=[name])
- archive_file = '%s/%s.tar.gz' % (ARCHIVES_PATH, name)
+ archive_file = "%s/%s.tar" % (ARCHIVES_PATH, name)
+ if os.path.exists(archive_file + ".gz"):
+ archive_file += ".gz"
info_file = "%s/%s.info.json" % (ARCHIVES_PATH, name)
files_to_delete = [archive_file, info_file]
@@ -2561,15 +2738,18 @@ def backup_delete(name):
files_to_delete.append(actual_archive)
for backup_file in files_to_delete:
+ if not os.path.exists(backup_file):
+ continue
try:
os.remove(backup_file)
- except:
+ except Exception:
logger.debug("unable to delete '%s'", backup_file, exc_info=1)
- logger.warning(m18n.n('backup_delete_error', path=backup_file))
+ logger.warning(m18n.n("backup_delete_error", path=backup_file))
- hook_callback('post_backup_delete', args=[name])
+ hook_callback("post_backup_delete", args=[name])
+
+ logger.success(m18n.n("backup_deleted"))
- logger.success(m18n.n('backup_deleted'))
#
@@ -2610,10 +2790,10 @@ def backup_repository_remove(name, purge):
def _create_archive_dir():
- """ Create the YunoHost archives directory if doesn't exist """
+ """Create the YunoHost archives directory if doesn't exist"""
if not os.path.isdir(ARCHIVES_PATH):
if os.path.lexists(ARCHIVES_PATH):
- raise YunohostError('backup_output_symlink_dir_broken', path=ARCHIVES_PATH)
+ raise YunohostError("backup_output_symlink_dir_broken", path=ARCHIVES_PATH)
# Create the archive folder, with 'admin' as owner, such that
# people can scp archives out of the server
@@ -2621,13 +2801,13 @@ def _create_archive_dir():
def _call_for_each_path(self, callback, csv_path=None):
- """ Call a callback for each path in csv """
+ """Call a callback for each path in csv"""
if csv_path is None:
csv_path = self.csv_path
with open(csv_path, "r") as backup_file:
- backup_csv = csv.DictReader(backup_file, fieldnames=['source', 'dest'])
+ backup_csv = csv.DictReader(backup_file, fieldnames=["source", "dest"])
for row in backup_csv:
- callback(self, row['source'], row['dest'])
+ callback(self, row["source"], row["dest"])
def _recursive_umount(directory):
@@ -2637,31 +2817,48 @@ def _recursive_umount(directory):
Args:
directory -- a directory path
"""
- mount_lines = subprocess.check_output("mount").split("\n")
+ mount_lines = check_output("mount").split("\n")
- points_to_umount = [line.split(" ")[2]
- for line in mount_lines
- if len(line) >= 3 and line.split(" ")[2].startswith(directory)]
+ points_to_umount = [
+ line.split(" ")[2]
+ for line in mount_lines
+ if len(line) >= 3 and line.split(" ")[2].startswith(os.path.realpath(directory))
+ ]
everything_went_fine = True
for point in reversed(points_to_umount):
ret = subprocess.call(["umount", point])
if ret != 0:
everything_went_fine = False
- logger.warning(m18n.n('backup_cleaning_failed', point))
+ logger.warning(m18n.n("backup_cleaning_failed", point))
continue
return everything_went_fine
-def free_space_in_directory(dirpath):
- stat = os.statvfs(dirpath)
- return stat.f_frsize * stat.f_bavail
-
-
def disk_usage(path):
# We don't do this in python with os.stat because we don't want
# to follow symlinks
- du_output = subprocess.check_output(['du', '-sb', path])
- return int(du_output.split()[0].decode('utf-8'))
+ du_output = check_output(["du", "-sb", path], shell=False)
+ return int(du_output.split()[0])
+
+
+def binary_to_human(n, customary=False):
+ """
+ Convert bytes or bits into human readable format with binary prefix
+ Keyword argument:
+ n -- Number to convert
+ customary -- Use customary symbol instead of IEC standard
+ """
+ symbols = ("Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi")
+ if customary:
+ symbols = ("K", "M", "G", "T", "P", "E", "Z", "Y")
+ prefix = {}
+ for i, s in enumerate(symbols):
+ prefix[s] = 1 << (i + 1) * 10
+ for s in reversed(symbols):
+ if n >= prefix[s]:
+ value = float(n) / prefix[s]
+ return "%.1f%s" % (value, s)
+ return "%s" % n
diff --git a/src/yunohost/certificate.py b/src/yunohost/certificate.py
index d141ac8e5..817f9d57a 100644
--- a/src/yunohost/certificate.py
+++ b/src/yunohost/certificate.py
@@ -27,27 +27,25 @@ import sys
import shutil
import pwd
import grp
-import smtplib
import subprocess
-import dns.resolver
import glob
from datetime import datetime
-from yunohost.vendor.acme_tiny.acme_tiny import get_crt as sign_certificate
-
-from yunohost.utils.error import YunohostError
+from moulinette import m18n
from moulinette.utils.log import getActionLogger
+from moulinette.utils.filesystem import read_file
+from yunohost.vendor.acme_tiny.acme_tiny import get_crt as sign_certificate
+from yunohost.utils.error import YunohostError, YunohostValidationError
from yunohost.utils.network import get_public_ip
-from moulinette import m18n
-from yunohost.app import app_ssowatconf
+from yunohost.diagnosis import Diagnoser
from yunohost.service import _run_service_command
from yunohost.regenconf import regen_conf
from yunohost.log import OperationLogger
-logger = getActionLogger('yunohost.certmanager')
+logger = getActionLogger("yunohost.certmanager")
CERT_FOLDER = "/etc/yunohost/certs/"
TMP_FOLDER = "/tmp/acme-challenge-private/"
@@ -56,31 +54,17 @@ WEBROOT_FOLDER = "/tmp/acme-challenge-public/"
SELF_CA_FILE = "/etc/ssl/certs/ca-yunohost_crt.pem"
ACCOUNT_KEY_FILE = "/etc/yunohost/letsencrypt_account.pem"
-SSL_DIR = '/usr/share/yunohost/yunohost-config/ssl/yunoCA'
+SSL_DIR = "/usr/share/yunohost/yunohost-config/ssl/yunoCA"
KEY_SIZE = 3072
VALIDITY_LIMIT = 15 # days
# For tests
-STAGING_CERTIFICATION_AUTHORITY = "https://acme-staging.api.letsencrypt.org"
+STAGING_CERTIFICATION_AUTHORITY = "https://acme-staging-v02.api.letsencrypt.org"
# For prod
PRODUCTION_CERTIFICATION_AUTHORITY = "https://acme-v02.api.letsencrypt.org"
-INTERMEDIATE_CERTIFICATE_URL = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem"
-
-DNS_RESOLVERS = [
- # FFDN DNS resolvers
- # See https://www.ffdn.org/wiki/doku.php?id=formations:dns
- "80.67.169.12", # FDN
- "80.67.169.40", #
- "89.234.141.66", # ARN
- "141.255.128.100", # Aquilenet
- "141.255.128.101",
- "89.234.186.18", # Grifon
- "80.67.188.188" # LDN
-]
-
#
# Front-end stuff #
#
@@ -99,14 +83,11 @@ def certificate_status(domain_list, full=False):
# If no domains given, consider all yunohost domains
if domain_list == []:
- domain_list = yunohost.domain.domain_list()['domains']
+ domain_list = yunohost.domain.domain_list()["domains"]
# Else, validate that yunohost knows the domains given
else:
- yunohost_domains_list = yunohost.domain.domain_list()['domains']
for domain in domain_list:
- # Is it in Yunohost domain list?
- if domain not in yunohost_domains_list:
- raise YunohostError('certmanager_domain_unknown', domain=domain)
+ yunohost.domain._assert_domain_exists(domain)
certificates = {}
@@ -116,17 +97,25 @@ def certificate_status(domain_list, full=False):
if not full:
del status["subject"]
del status["CA_name"]
- del status["ACME_eligible"]
status["CA_type"] = status["CA_type"]["verbose"]
status["summary"] = status["summary"]["verbose"]
+ if full:
+ try:
+ _check_domain_is_ready_for_ACME(domain)
+ status["ACME_eligible"] = True
+ except Exception:
+ status["ACME_eligible"] = False
+
del status["domain"]
certificates[domain] = status
return {"certificates": certificates}
-def certificate_install(domain_list, force=False, no_checks=False, self_signed=False, staging=False):
+def certificate_install(
+ domain_list, force=False, no_checks=False, self_signed=False, staging=False
+):
"""
Install a Let's Encrypt certificate for given domains (all by default)
@@ -141,21 +130,24 @@ def certificate_install(domain_list, force=False, no_checks=False, self_signed=F
if self_signed:
_certificate_install_selfsigned(domain_list, force)
else:
- _certificate_install_letsencrypt(
- domain_list, force, no_checks, staging)
+ _certificate_install_letsencrypt(domain_list, force, no_checks, staging)
def _certificate_install_selfsigned(domain_list, force=False):
for domain in domain_list:
- operation_logger = OperationLogger('selfsigned_cert_install', [('domain', domain)],
- args={'force': force})
+ operation_logger = OperationLogger(
+ "selfsigned_cert_install", [("domain", domain)], args={"force": force}
+ )
# Paths of files and folder we'll need
date_tag = datetime.utcnow().strftime("%Y%m%d.%H%M%S")
new_cert_folder = "%s/%s-history/%s-selfsigned" % (
- CERT_FOLDER, domain, date_tag)
+ CERT_FOLDER,
+ domain,
+ date_tag,
+ )
conf_template = os.path.join(SSL_DIR, "openssl.cnf")
@@ -170,8 +162,10 @@ def _certificate_install_selfsigned(domain_list, force=False):
if not force and os.path.isfile(current_cert_file):
status = _get_status(domain)
- if status["summary"]["code"] in ('good', 'great'):
- raise YunohostError('certmanager_attempt_to_replace_valid_cert', domain=domain)
+ if status["summary"]["code"] in ("good", "great"):
+ raise YunohostValidationError(
+ "certmanager_attempt_to_replace_valid_cert", domain=domain
+ )
operation_logger.start()
@@ -195,13 +189,16 @@ def _certificate_install_selfsigned(domain_list, force=False):
for command in commands:
p = subprocess.Popen(
- command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+ )
out, _ = p.communicate()
+ out = out.decode("utf-8")
+
if p.returncode != 0:
logger.warning(out)
- raise YunohostError('domain_cert_gen_failed')
+ raise YunohostError("domain_cert_gen_failed")
else:
logger.debug(out)
@@ -227,17 +224,27 @@ def _certificate_install_selfsigned(domain_list, force=False):
# Check new status indicate a recently created self-signed certificate
status = _get_status(domain)
- if status and status["CA_type"]["code"] == "self-signed" and status["validity"] > 3648:
+ if (
+ status
+ and status["CA_type"]["code"] == "self-signed"
+ and status["validity"] > 3648
+ ):
logger.success(
- m18n.n("certmanager_cert_install_success_selfsigned", domain=domain))
+ m18n.n("certmanager_cert_install_success_selfsigned", domain=domain)
+ )
operation_logger.success()
else:
- msg = "Installation of self-signed certificate installation for %s failed !" % (domain)
+ msg = (
+ "Installation of self-signed certificate installation for %s failed !"
+ % (domain)
+ )
logger.error(msg)
operation_logger.error(msg)
-def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False, staging=False):
+def _certificate_install_letsencrypt(
+ domain_list, force=False, no_checks=False, staging=False
+):
import yunohost.domain
if not os.path.exists(ACCOUNT_KEY_FILE):
@@ -246,7 +253,7 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False,
# If no domains given, consider all yunohost domains with self-signed
# certificates
if domain_list == []:
- for domain in yunohost.domain.domain_list()['domains']:
+ for domain in yunohost.domain.domain_list()["domains"]:
status = _get_status(domain)
if status["CA_type"]["code"] != "self-signed":
@@ -257,50 +264,62 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False,
# Else, validate that yunohost knows the domains given
else:
for domain in domain_list:
- yunohost_domains_list = yunohost.domain.domain_list()['domains']
- if domain not in yunohost_domains_list:
- raise YunohostError('certmanager_domain_unknown', domain=domain)
+ yunohost.domain._assert_domain_exists(domain)
# Is it self-signed?
status = _get_status(domain)
if not force and status["CA_type"]["code"] != "self-signed":
- raise YunohostError('certmanager_domain_cert_not_selfsigned', domain=domain)
+ raise YunohostValidationError(
+ "certmanager_domain_cert_not_selfsigned", domain=domain
+ )
if staging:
logger.warning(
- "Please note that you used the --staging option, and that no new certificate will actually be enabled !")
+ "Please note that you used the --staging option, and that no new certificate will actually be enabled !"
+ )
# Actual install steps
for domain in domain_list:
- operation_logger = OperationLogger('letsencrypt_cert_install', [('domain', domain)],
- args={'force': force, 'no_checks': no_checks,
- 'staging': staging})
- logger.info(
- "Now attempting install of certificate for domain %s!", domain)
+ if not no_checks:
+ try:
+ _check_domain_is_ready_for_ACME(domain)
+ except Exception as e:
+ logger.error(e)
+ continue
+
+ logger.info("Now attempting install of certificate for domain %s!", domain)
+
+ operation_logger = OperationLogger(
+ "letsencrypt_cert_install",
+ [("domain", domain)],
+ args={"force": force, "no_checks": no_checks, "staging": staging},
+ )
+ operation_logger.start()
try:
- if not no_checks:
- _check_domain_is_ready_for_ACME(domain)
-
- operation_logger.start()
-
- _configure_for_acme_challenge(domain)
_fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks)
- _install_cron(no_checks=no_checks)
-
- logger.success(
- m18n.n("certmanager_cert_install_success", domain=domain))
-
- operation_logger.success()
except Exception as e:
- _display_debug_information(domain)
- msg = "Certificate installation for %s failed !\nException: %s" % (domain, e)
+ msg = "Certificate installation for %s failed !\nException: %s" % (
+ domain,
+ e,
+ )
logger.error(msg)
operation_logger.error(msg)
+ if no_checks:
+ logger.error(
+ "Please consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s."
+ % domain
+ )
+ else:
+ logger.success(m18n.n("certmanager_cert_install_success", domain=domain))
+
+ operation_logger.success()
-def certificate_renew(domain_list, force=False, no_checks=False, email=False, staging=False):
+def certificate_renew(
+ domain_list, force=False, no_checks=False, email=False, staging=False
+):
"""
Renew Let's Encrypt certificate for given domains (all by default)
@@ -317,7 +336,7 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
# If no domains given, consider all yunohost domains with Let's Encrypt
# certificates
if domain_list == []:
- for domain in yunohost.domain.domain_list()['domains']:
+ for domain in yunohost.domain.domain_list()["domains"]:
# Does it have a Let's Encrypt cert?
status = _get_status(domain)
@@ -330,8 +349,9 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
# Check ACME challenge configured for given domain
if not _check_acme_challenge_configuration(domain):
- logger.warning(m18n.n(
- 'certmanager_acme_not_configured_for_domain', domain=domain))
+ logger.warning(
+ m18n.n("certmanager_acme_not_configured_for_domain", domain=domain)
+ )
continue
domain_list.append(domain)
@@ -343,57 +363,75 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
else:
for domain in domain_list:
- # Is it in Yunohost dmomain list?
- if domain not in yunohost.domain.domain_list()['domains']:
- raise YunohostError('certmanager_domain_unknown', domain=domain)
+ # Is it in Yunohost domain list?
+ yunohost.domain._assert_domain_exists(domain)
status = _get_status(domain)
# Does it expire soon?
if status["validity"] > VALIDITY_LIMIT and not force:
- raise YunohostError('certmanager_attempt_to_renew_valid_cert', domain=domain)
+ raise YunohostValidationError(
+ "certmanager_attempt_to_renew_valid_cert", domain=domain
+ )
# Does it have a Let's Encrypt cert?
if status["CA_type"]["code"] != "lets-encrypt":
- raise YunohostError('certmanager_attempt_to_renew_nonLE_cert', domain=domain)
+ raise YunohostValidationError(
+ "certmanager_attempt_to_renew_nonLE_cert", domain=domain
+ )
# Check ACME challenge configured for given domain
if not _check_acme_challenge_configuration(domain):
- raise YunohostError('certmanager_acme_not_configured_for_domain', domain=domain)
+ raise YunohostValidationError(
+ "certmanager_acme_not_configured_for_domain", domain=domain
+ )
if staging:
logger.warning(
- "Please note that you used the --staging option, and that no new certificate will actually be enabled !")
+ "Please note that you used the --staging option, and that no new certificate will actually be enabled !"
+ )
# Actual renew steps
for domain in domain_list:
- operation_logger = OperationLogger('letsencrypt_cert_renew', [('domain', domain)],
- args={'force': force, 'no_checks': no_checks,
- 'staging': staging, 'email': email})
+ if not no_checks:
+ try:
+ _check_domain_is_ready_for_ACME(domain)
+ except Exception as e:
+ logger.error(e)
+ if email:
+ logger.error("Sending email with details to root ...")
+ _email_renewing_failed(domain, e)
+ continue
- logger.info(
- "Now attempting renewing of certificate for domain %s !", domain)
+ logger.info("Now attempting renewing of certificate for domain %s !", domain)
+
+ operation_logger = OperationLogger(
+ "letsencrypt_cert_renew",
+ [("domain", domain)],
+ args={
+ "force": force,
+ "no_checks": no_checks,
+ "staging": staging,
+ "email": email,
+ },
+ )
+ operation_logger.start()
try:
- if not no_checks:
- _check_domain_is_ready_for_ACME(domain)
-
- operation_logger.start()
-
_fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks)
-
- logger.success(
- m18n.n("certmanager_cert_renew_success", domain=domain))
-
- operation_logger.success()
-
except Exception as e:
import traceback
- from StringIO import StringIO
+ from io import StringIO
+
stack = StringIO()
traceback.print_exc(file=stack)
- msg = "Certificate renewing for %s failed !" % (domain)
+ msg = "Certificate renewing for %s failed!" % (domain)
+ if no_checks:
+ msg += (
+ "\nPlease consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s."
+ % domain
+ )
logger.error(msg)
operation_logger.error(msg)
logger.error(stack.getvalue())
@@ -401,39 +439,18 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
if email:
logger.error("Sending email with details to root ...")
- _email_renewing_failed(domain, e, stack.getvalue())
+ _email_renewing_failed(domain, msg + "\n" + str(e), stack.getvalue())
+ else:
+ logger.success(m18n.n("certmanager_cert_renew_success", domain=domain))
+ operation_logger.success()
+
#
# Back-end stuff #
#
-def _install_cron(no_checks=False):
- cron_job_file = "/etc/cron.daily/yunohost-certificate-renew"
-
- # we need to check if "--no-checks" isn't already put inside the existing
- # crontab, if it's the case it's probably because another domain needed it
- # at some point so we keep it
- if not no_checks and os.path.exists(cron_job_file):
- with open(cron_job_file, "r") as f:
- # no the best test in the world but except if we uses a shell
- # script parser I'm not expected a much more better way to do that
- no_checks = "--no-checks" in f.read()
-
- command = "yunohost domain cert-renew --email\n"
-
- if no_checks:
- # handle trailing "\n with ":-1"
- command = command[:-1] + " --no-checks\n"
-
- with open(cron_job_file, "w") as f:
- f.write("#!/bin/bash\n")
- f.write(command)
-
- _set_permissions(cron_job_file, "root", "root", 0o755)
-
-
-def _email_renewing_failed(domain, exception_message, stack):
+def _email_renewing_failed(domain, exception_message, stack=""):
from_ = "certmanager@%s (Certificate Manager)" % domain
to_ = "root"
subject_ = "Certificate renewing attempt for %s failed!" % domain
@@ -453,7 +470,12 @@ investigate :
-- Certificate Manager
-""" % (domain, exception_message, stack, logs)
+""" % (
+ domain,
+ exception_message,
+ stack,
+ logs,
+ )
message = """\
From: %s
@@ -461,71 +483,37 @@ To: %s
Subject: %s
%s
-""" % (from_, to_, subject_, text)
+""" % (
+ from_,
+ to_,
+ subject_,
+ text,
+ )
+
+ import smtplib
smtp = smtplib.SMTP("localhost")
- smtp.sendmail(from_, [to_], message)
+ smtp.sendmail(from_, [to_], message.encode("utf-8"))
smtp.quit()
-def _configure_for_acme_challenge(domain):
-
- nginx_conf_folder = "/etc/nginx/conf.d/%s.d" % domain
- nginx_conf_file = "%s/000-acmechallenge.conf" % nginx_conf_folder
-
- nginx_configuration = '''
-location ^~ '/.well-known/acme-challenge/'
-{
- default_type "text/plain";
- alias %s;
-}
- ''' % WEBROOT_FOLDER
-
- # Check there isn't a conflicting file for the acme-challenge well-known
- # uri
- for path in glob.glob('%s/*.conf' % nginx_conf_folder):
-
- if path == nginx_conf_file:
- continue
-
- with open(path) as f:
- contents = f.read()
-
- if '/.well-known/acme-challenge' in contents:
- raise YunohostError('certmanager_conflicting_nginx_file', filepath=path)
-
- # Write the conf
- if os.path.exists(nginx_conf_file):
- logger.debug(
- "Nginx configuration file for ACME challenge already exists for domain, skipping.")
- return
-
- logger.debug(
- "Adding Nginx configuration file for Acme challenge for domain %s.", domain)
-
- with open(nginx_conf_file, "w") as f:
- f.write(nginx_configuration)
-
- # Assume nginx conf is okay, and reload it
- # (FIXME : maybe add a check that it is, using nginx -t, haven't found
- # any clean function already implemented in yunohost to do this though)
- _run_service_command("reload", "nginx")
-
- app_ssowatconf()
-
-
def _check_acme_challenge_configuration(domain):
- # Check nginx conf file exists
- nginx_conf_folder = "/etc/nginx/conf.d/%s.d" % domain
- nginx_conf_file = "%s/000-acmechallenge.conf" % nginx_conf_folder
- if not os.path.exists(nginx_conf_file):
- return False
- else:
+ domain_conf = "/etc/nginx/conf.d/%s.conf" % domain
+ if "include /etc/nginx/conf.d/acme-challenge.conf.inc" in read_file(domain_conf):
return True
+ else:
+ # This is for legacy setups which haven't updated their domain conf to
+ # the new conf that include the acme snippet...
+ legacy_acme_conf = "/etc/nginx/conf.d/%s.d/000-acmechallenge.conf" % domain
+ return os.path.exists(legacy_acme_conf)
def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
+
+ if not os.path.exists(ACCOUNT_KEY_FILE):
+ _generate_account_key()
+
# Make sure tmp folder exists
logger.debug("Making sure tmp folders exists...")
@@ -542,8 +530,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
_regen_dnsmasq_if_needed()
# Prepare certificate signing request
- logger.debug(
- "Prepare key and certificate signing request (CSR) for %s...", domain)
+ logger.debug("Prepare key and certificate signing request (CSR) for %s...", domain)
domain_key_file = "%s/%s.pem" % (TMP_FOLDER, domain)
_generate_key(domain_key_file)
@@ -562,30 +549,25 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
certification_authority = PRODUCTION_CERTIFICATION_AUTHORITY
try:
- signed_certificate = sign_certificate(ACCOUNT_KEY_FILE,
- domain_csr_file,
- WEBROOT_FOLDER,
- log=logger,
- disable_check=no_checks,
- CA=certification_authority)
+ signed_certificate = sign_certificate(
+ ACCOUNT_KEY_FILE,
+ domain_csr_file,
+ WEBROOT_FOLDER,
+ log=logger,
+ disable_check=no_checks,
+ CA=certification_authority,
+ )
except ValueError as e:
if "urn:acme:error:rateLimited" in str(e):
- raise YunohostError('certmanager_hit_rate_limit', domain=domain)
+ raise YunohostError("certmanager_hit_rate_limit", domain=domain)
else:
logger.error(str(e))
- _display_debug_information(domain)
- raise YunohostError('certmanager_cert_signing_failed')
+ raise YunohostError("certmanager_cert_signing_failed")
except Exception as e:
logger.error(str(e))
- raise YunohostError('certmanager_cert_signing_failed')
-
- import requests # lazy loading this module for performance reasons
- try:
- intermediate_certificate = requests.get(INTERMEDIATE_CERTIFICATE_URL, timeout=30).text
- except requests.exceptions.Timeout as e:
- raise YunohostError('certmanager_couldnt_fetch_intermediate_cert')
+ raise YunohostError("certmanager_cert_signing_failed")
# Now save the key and signed certificate
logger.debug("Saving the key and signed certificate...")
@@ -599,7 +581,11 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
folder_flag = "letsencrypt"
new_cert_folder = "%s/%s-history/%s-%s" % (
- CERT_FOLDER, domain, date_tag, folder_flag)
+ CERT_FOLDER,
+ domain,
+ date_tag,
+ folder_flag,
+ )
os.makedirs(new_cert_folder)
@@ -615,7 +601,6 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
with open(domain_cert_file, "w") as f:
f.write(signed_certificate)
- f.write(intermediate_certificate)
_set_permissions(domain_cert_file, "root", "ssl-cert", 0o640)
@@ -628,19 +613,52 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
status_summary = _get_status(domain)["summary"]
if status_summary["code"] != "great":
- raise YunohostError('certmanager_certificate_fetching_or_enabling_failed', domain=domain)
+ raise YunohostError(
+ "certmanager_certificate_fetching_or_enabling_failed", domain=domain
+ )
def _prepare_certificate_signing_request(domain, key_file, output_folder):
from OpenSSL import crypto # lazy loading this module for performance reasons
+
# Init a request
csr = crypto.X509Req()
# Set the domain
csr.get_subject().CN = domain
+ from yunohost.domain import domain_list
+
+ # For "parent" domains, include xmpp-upload subdomain in subject alternate names
+ if domain in domain_list(exclude_subdomains=True)["domains"]:
+ subdomain = "xmpp-upload." + domain
+ xmpp_records = (
+ Diagnoser.get_cached_report(
+ "dnsrecords", item={"domain": domain, "category": "xmpp"}
+ ).get("data")
+ or {}
+ )
+ if xmpp_records.get("CNAME:xmpp-upload") == "OK":
+ csr.add_extensions(
+ [
+ crypto.X509Extension(
+ "subjectAltName".encode("utf8"),
+ False,
+ ("DNS:" + subdomain).encode("utf8"),
+ )
+ ]
+ )
+ else:
+ logger.warning(
+ m18n.n(
+ "certmanager_warning_subdomain_dns_record",
+ subdomain=subdomain,
+ domain=domain,
+ )
+ )
+
# Set the key
- with open(key_file, 'rt') as f:
+ with open(key_file, "rt") as f:
key = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read())
csr.set_pubkey(key)
@@ -652,7 +670,7 @@ def _prepare_certificate_signing_request(domain, key_file, output_folder):
csr_file = output_folder + domain + ".csr"
logger.debug("Saving to %s.", csr_file)
- with open(csr_file, "w") as f:
+ with open(csr_file, "wb") as f:
f.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr))
@@ -661,29 +679,38 @@ def _get_status(domain):
cert_file = os.path.join(CERT_FOLDER, domain, "crt.pem")
if not os.path.isfile(cert_file):
- raise YunohostError('certmanager_no_cert_file', domain=domain, file=cert_file)
+ raise YunohostError("certmanager_no_cert_file", domain=domain, file=cert_file)
from OpenSSL import crypto # lazy loading this module for performance reasons
+
try:
- cert = crypto.load_certificate(
- crypto.FILETYPE_PEM, open(cert_file).read())
+ cert = crypto.load_certificate(crypto.FILETYPE_PEM, open(cert_file).read())
except Exception as exception:
import traceback
+
traceback.print_exc(file=sys.stdout)
- raise YunohostError('certmanager_cannot_read_cert', domain=domain, file=cert_file, reason=exception)
+ raise YunohostError(
+ "certmanager_cannot_read_cert",
+ domain=domain,
+ file=cert_file,
+ reason=exception,
+ )
cert_subject = cert.get_subject().CN
cert_issuer = cert.get_issuer().CN
- valid_up_to = datetime.strptime(cert.get_notAfter(), "%Y%m%d%H%M%SZ")
+ organization_name = cert.get_issuer().O
+ valid_up_to = datetime.strptime(
+ cert.get_notAfter().decode("utf-8"), "%Y%m%d%H%M%SZ"
+ )
days_remaining = (valid_up_to - datetime.utcnow()).days
- if cert_issuer == _name_self_CA():
+ if cert_issuer == "yunohost.org" or cert_issuer == _name_self_CA():
CA_type = {
"code": "self-signed",
"verbose": "Self-signed",
}
- elif cert_issuer.startswith("Let's Encrypt"):
+ elif organization_name == "Let's Encrypt":
CA_type = {
"code": "lets-encrypt",
"verbose": "Let's Encrypt",
@@ -737,12 +764,6 @@ def _get_status(domain):
"verbose": "Unknown?",
}
- try:
- _check_domain_is_ready_for_ACME(domain)
- ACME_eligible = True
- except:
- ACME_eligible = False
-
return {
"domain": domain,
"subject": cert_subject,
@@ -750,9 +771,9 @@ def _get_status(domain):
"CA_type": CA_type,
"validity": days_remaining,
"summary": status_summary,
- "ACME_eligible": ACME_eligible
}
+
#
# Misc small stuff ... #
#
@@ -766,10 +787,11 @@ def _generate_account_key():
def _generate_key(destination_path):
from OpenSSL import crypto # lazy loading this module for performance reasons
+
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, KEY_SIZE)
- with open(destination_path, "w") as f:
+ with open(destination_path, "wb") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
@@ -804,15 +826,16 @@ def _enable_certificate(domain, new_cert_folder):
for service in ("postfix", "dovecot", "metronome"):
_run_service_command("restart", service)
- if os.path.isfile('/etc/yunohost/installed'):
+ if os.path.isfile("/etc/yunohost/installed"):
# regen nginx conf to be sure it integrates OCSP Stapling
# (We don't do this yet if postinstall is not finished yet)
- regen_conf(names=['nginx'])
+ regen_conf(names=["nginx"])
_run_service_command("reload", "nginx")
from yunohost.hook import hook_callback
- hook_callback('post_cert_update', args=[domain])
+
+ hook_callback("post_cert_update", args=[domain])
def _backup_current_cert(domain):
@@ -827,68 +850,41 @@ def _backup_current_cert(domain):
def _check_domain_is_ready_for_ACME(domain):
- public_ip = get_public_ip()
+
+ dnsrecords = (
+ Diagnoser.get_cached_report(
+ "dnsrecords",
+ item={"domain": domain, "category": "basic"},
+ warn_if_no_cache=False,
+ )
+ or {}
+ )
+ httpreachable = (
+ Diagnoser.get_cached_report(
+ "web", item={"domain": domain}, warn_if_no_cache=False
+ )
+ or {}
+ )
+
+ if not dnsrecords or not httpreachable:
+ raise YunohostValidationError(
+ "certmanager_domain_not_diagnosed_yet", domain=domain
+ )
# Check if IP from DNS matches public IP
- if not _dns_ip_match_public_ip(public_ip, domain):
- raise YunohostError('certmanager_domain_dns_ip_differs_from_public_ip', domain=domain)
+ if not dnsrecords.get("status") in [
+ "SUCCESS",
+ "WARNING",
+ ]: # Warning is for missing IPv6 record which ain't critical for ACME
+ raise YunohostValidationError(
+ "certmanager_domain_dns_ip_differs_from_public_ip", domain=domain
+ )
# Check if domain seems to be accessible through HTTP?
- if not _domain_is_accessible_through_HTTP(public_ip, domain):
- raise YunohostError('certmanager_domain_http_not_working', domain=domain)
-
-
-def _get_dns_ip(domain):
- try:
- resolver = dns.resolver.Resolver()
- resolver.nameservers = DNS_RESOLVERS
- answers = resolver.query(domain, "A")
- except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
- raise YunohostError('certmanager_error_no_A_record', domain=domain)
-
- return str(answers[0])
-
-
-def _dns_ip_match_public_ip(public_ip, domain):
- return _get_dns_ip(domain) == public_ip
-
-
-def _domain_is_accessible_through_HTTP(ip, domain):
- import requests # lazy loading this module for performance reasons
- try:
- requests.head("http://" + ip, headers={"Host": domain}, timeout=10)
- except requests.exceptions.Timeout as e:
- logger.warning(m18n.n('certmanager_http_check_timeout', domain=domain, ip=ip))
- return False
- except Exception as e:
- logger.debug("Couldn't reach domain '%s' by requesting this ip '%s' because: %s" % (domain, ip, e))
- return False
-
- return True
-
-
-def _get_local_dns_ip(domain):
- try:
- resolver = dns.resolver.Resolver()
- answers = resolver.query(domain, "A")
- except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
- logger.warning("Failed to resolved domain '%s' locally", domain)
- return None
-
- return str(answers[0])
-
-
-def _display_debug_information(domain):
- dns_ip = _get_dns_ip(domain)
- public_ip = get_public_ip()
- local_dns_ip = _get_local_dns_ip(domain)
-
- logger.warning("""\
-Debug information:
- - domain ip from DNS %s
- - domain ip from local DNS %s
- - public ip of the server %s
-""", dns_ip, local_dns_ip, public_ip)
+ if not httpreachable.get("status") == "SUCCESS":
+ raise YunohostValidationError(
+ "certmanager_domain_http_not_working", domain=domain
+ )
# FIXME / TODO : ideally this should not be needed. There should be a proper
@@ -909,11 +905,11 @@ def _regen_dnsmasq_if_needed():
for domainconf in domainsconf:
# Look for the IP, it's in the lines with this format :
- # address=/the.domain.tld/11.22.33.44
+ # host-record=the.domain.tld,11.22.33.44
for line in open(domainconf).readlines():
- if not line.startswith("address"):
+ if not line.startswith("host-record"):
continue
- ip = line.strip().split("/")[2]
+ ip = line.strip().split(",")[-1]
# Compared found IP to current IPv4 / IPv6
# IPv6 IPv4
@@ -932,7 +928,7 @@ def _name_self_CA():
ca_conf = os.path.join(SSL_DIR, "openssl.ca.cnf")
if not os.path.exists(ca_conf):
- logger.warning(m18n.n('certmanager_self_ca_conf_file_not_found', file=ca_conf))
+ logger.warning(m18n.n("certmanager_self_ca_conf_file_not_found", file=ca_conf))
return ""
with open(ca_conf) as f:
@@ -942,16 +938,11 @@ def _name_self_CA():
if line.startswith("commonName_default"):
return line.split()[2]
- logger.warning(m18n.n('certmanager_unable_to_parse_self_CA_name', file=ca_conf))
+ logger.warning(m18n.n("certmanager_unable_to_parse_self_CA_name", file=ca_conf))
return ""
def _tail(n, file_path):
- stdin, stdout = os.popen2("tail -n %s '%s'" % (n, file_path))
+ from moulinette.utils.process import check_output
- stdin.close()
-
- lines = stdout.readlines()
- stdout.close()
-
- return "".join(lines)
+ return check_output(f"tail -n {n} '{file_path}'")
diff --git a/src/yunohost/data_migrations/0001_change_cert_group_to_sslcert.py b/src/yunohost/data_migrations/0001_change_cert_group_to_sslcert.py
deleted file mode 100644
index 6485861b7..000000000
--- a/src/yunohost/data_migrations/0001_change_cert_group_to_sslcert.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import subprocess
-import glob
-from yunohost.tools import Migration
-from moulinette.utils.filesystem import chown
-
-
-class MyMigration(Migration):
-
- "Change certificates group permissions from 'metronome' to 'ssl-cert'"
-
- all_certificate_files = glob.glob("/etc/yunohost/certs/*/*.pem")
-
- def forward(self):
- for filename in self.all_certificate_files:
- chown(filename, uid="root", gid="ssl-cert")
-
- def backward(self):
- for filename in self.all_certificate_files:
- chown(filename, uid="root", gid="metronome")
diff --git a/src/yunohost/data_migrations/0002_migrate_to_tsig_sha256.py b/src/yunohost/data_migrations/0002_migrate_to_tsig_sha256.py
deleted file mode 100644
index 824245c82..000000000
--- a/src/yunohost/data_migrations/0002_migrate_to_tsig_sha256.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import glob
-import os
-import requests
-import base64
-import time
-import json
-
-from moulinette import m18n
-from yunohost.utils.error import YunohostError
-from moulinette.utils.log import getActionLogger
-
-from yunohost.tools import Migration
-from yunohost.dyndns import _guess_current_dyndns_domain
-
-logger = getActionLogger('yunohost.migration')
-
-
-class MyMigration(Migration):
-
- "Migrate Dyndns stuff from MD5 TSIG to SHA512 TSIG"
-
- def backward(self):
- # Not possible because that's a non-reversible operation ?
- pass
-
- def migrate(self, dyn_host="dyndns.yunohost.org", domain=None, private_key_path=None):
-
- if domain is None or private_key_path is None:
- try:
- (domain, private_key_path) = _guess_current_dyndns_domain(dyn_host)
- assert "+157" in private_key_path
- except (YunohostError, AssertionError):
- logger.info(m18n.n("migrate_tsig_not_needed"))
- return
-
- logger.info(m18n.n('migrate_tsig_start', domain=domain))
- public_key_path = private_key_path.rsplit(".private", 1)[0] + ".key"
- public_key_md5 = open(public_key_path).read().strip().split(' ')[-1]
-
- os.system('cd /etc/yunohost/dyndns && '
- 'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain)
- os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private')
-
- # +165 means that this file store a hmac-sha512 key
- new_key_path = glob.glob('/etc/yunohost/dyndns/*+165*.key')[0]
- public_key_sha512 = open(new_key_path).read().strip().split(' ', 6)[-1]
-
- try:
- r = requests.put('https://%s/migrate_key_to_sha512/' % (dyn_host),
- data={
- 'public_key_md5': base64.b64encode(public_key_md5),
- 'public_key_sha512': base64.b64encode(public_key_sha512),
- }, timeout=30)
- except requests.ConnectionError:
- raise YunohostError('no_internet_connection')
-
- if r.status_code != 201:
- try:
- error = json.loads(r.text)['error']
- except Exception:
- # failed to decode json
- error = r.text
-
- import traceback
- from StringIO import StringIO
- stack = StringIO()
- traceback.print_stack(file=stack)
- logger.error(stack.getvalue())
-
- # Migration didn't succeed, so we rollback and raise an exception
- os.system("mv /etc/yunohost/dyndns/*+165* /tmp")
-
- raise YunohostError('migrate_tsig_failed', domain=domain,
- error_code=str(r.status_code), error=error)
-
- # remove old certificates
- os.system("mv /etc/yunohost/dyndns/*+157* /tmp")
-
- # sleep to wait for dyndns cache invalidation
- logger.info(m18n.n('migrate_tsig_wait'))
- time.sleep(60)
- logger.info(m18n.n('migrate_tsig_wait_2'))
- time.sleep(60)
- logger.info(m18n.n('migrate_tsig_wait_3'))
- time.sleep(30)
- logger.info(m18n.n('migrate_tsig_wait_4'))
- time.sleep(30)
-
- logger.info(m18n.n('migrate_tsig_end'))
- return
diff --git a/src/yunohost/data_migrations/0003_migrate_to_stretch.py b/src/yunohost/data_migrations/0003_migrate_to_stretch.py
deleted file mode 100644
index 0db719e15..000000000
--- a/src/yunohost/data_migrations/0003_migrate_to_stretch.py
+++ /dev/null
@@ -1,383 +0,0 @@
-import glob
-import os
-from shutil import copy2
-
-from moulinette import m18n, msettings
-from yunohost.utils.error import YunohostError
-from moulinette.utils.log import getActionLogger
-from moulinette.utils.process import check_output, call_async_output
-from moulinette.utils.filesystem import read_file
-
-from yunohost.tools import Migration
-from yunohost.app import unstable_apps
-from yunohost.service import _run_service_command
-from yunohost.regenconf import (manually_modified_files,
- manually_modified_files_compared_to_debian_default)
-from yunohost.utils.filesystem import free_space_in_directory
-from yunohost.utils.packages import get_installed_version
-from yunohost.utils.network import get_network_interfaces
-from yunohost.firewall import firewall_allow, firewall_disallow
-
-logger = getActionLogger('yunohost.migration')
-
-YUNOHOST_PACKAGES = ["yunohost", "yunohost-admin", "moulinette", "ssowat"]
-
-
-class MyMigration(Migration):
-
- "Upgrade the system to Debian Stretch and Yunohost 3.0"
-
- mode = "manual"
-
- def backward(self):
-
- raise YunohostError("migration_0003_backward_impossible")
-
- def migrate(self):
-
- self.logfile = "/var/log/yunohost/{}.log".format(self.name)
-
- self.check_assertions()
-
- logger.info(m18n.n("migration_0003_start", logfile=self.logfile))
-
- # Preparing the upgrade
- self.restore_original_nginx_conf_if_needed()
-
- logger.info(m18n.n("migration_0003_patching_sources_list"))
- self.patch_apt_sources_list()
- self.backup_files_to_keep()
- self.apt_update()
- apps_packages = self.get_apps_equivs_packages()
- self.unhold(["metronome"])
- self.hold(YUNOHOST_PACKAGES + apps_packages + ["fail2ban"])
-
- # Main dist-upgrade
- logger.info(m18n.n("migration_0003_main_upgrade"))
- _run_service_command("stop", "mysql")
- self.apt_dist_upgrade(conf_flags=["old", "miss", "def"])
- _run_service_command("start", "mysql")
- if self.debian_major_version() == 8:
- raise YunohostError("migration_0003_still_on_jessie_after_main_upgrade", log=self.logfile)
-
- # Specific upgrade for fail2ban...
- logger.info(m18n.n("migration_0003_fail2ban_upgrade"))
- self.unhold(["fail2ban"])
- # Don't move this if folder already exists. If it does, we probably are
- # running this script a 2nd, 3rd, ... time but /etc/fail2ban will
- # be re-created only for the first dist-upgrade of fail2ban
- if not os.path.exists("/etc/fail2ban.old"):
- os.system("mv /etc/fail2ban /etc/fail2ban.old")
- self.apt_dist_upgrade(conf_flags=["new", "miss", "def"])
- _run_service_command("restart", "fail2ban")
-
- self.disable_predicable_interface_names()
-
- # Clean the mess
- os.system("apt autoremove --assume-yes")
- os.system("apt clean --assume-yes")
-
- # We moved to port 587 for SMTP
- # https://busylog.net/smtp-tls-ssl-25-465-587/
- firewall_allow("Both", 587)
- firewall_disallow("Both", 465)
-
- # Upgrade yunohost packages
- logger.info(m18n.n("migration_0003_yunohost_upgrade"))
- self.restore_files_to_keep()
- self.unhold(YUNOHOST_PACKAGES + apps_packages)
- self.upgrade_yunohost_packages()
-
- def debian_major_version(self):
- # The python module "platform" and lsb_release are not reliable because
- # on some setup, they still return Release=8 even after upgrading to
- # stretch ... (Apparently this is related to OVH overriding some stuff
- # with /etc/lsb-release for instance -_-)
- # Instead, we rely on /etc/os-release which should be the raw info from
- # the distribution...
- return int(check_output("grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2"))
-
- def yunohost_major_version(self):
- return int(get_installed_version("yunohost").split('.')[0])
-
- def check_assertions(self):
-
- # Be on jessie (8.x) and yunohost 2.x
- # NB : we do both check to cover situations where the upgrade crashed
- # in the middle and debian version could be >= 9.x but yunohost package
- # would still be in 2.x...
- if not self.debian_major_version() == 8 \
- and not self.yunohost_major_version() == 2:
- raise YunohostError("migration_0003_not_jessie")
-
- # Have > 1 Go free space on /var/ ?
- if free_space_in_directory("/var/") / (1024**3) < 1.0:
- raise YunohostError("migration_0003_not_enough_free_space")
-
- # Check system is up to date
- # (but we don't if 'stretch' is already in the sources.list ...
- # which means maybe a previous upgrade crashed and we're re-running it)
- if " stretch " not in read_file("/etc/apt/sources.list"):
- self.apt_update()
- apt_list_upgradable = check_output("apt list --upgradable -a")
- if "upgradable" in apt_list_upgradable:
- raise YunohostError("migration_0003_system_not_fully_up_to_date")
-
- @property
- def disclaimer(self):
-
- # Avoid having a super long disclaimer + uncessary check if we ain't
- # on jessie / yunohost 2.x anymore
- # NB : we do both check to cover situations where the upgrade crashed
- # in the middle and debian version could be >= 9.x but yunohost package
- # would still be in 2.x...
- if not self.debian_major_version() == 8 \
- and not self.yunohost_major_version() == 2:
- return None
-
- # Get list of problematic apps ? I.e. not official or community+working
- problematic_apps = unstable_apps()
- problematic_apps = "".join(["\n - " + app for app in problematic_apps])
-
- # Manually modified files ? (c.f. yunohost service regen-conf)
- modified_files = manually_modified_files()
- # We also have a specific check for nginx.conf which some people
- # modified and needs to be upgraded...
- if "/etc/nginx/nginx.conf" in manually_modified_files_compared_to_debian_default():
- modified_files.append("/etc/nginx/nginx.conf")
- modified_files = "".join(["\n - " + f for f in modified_files])
-
- message = m18n.n("migration_0003_general_warning")
-
- if problematic_apps:
- message += "\n\n" + m18n.n("migration_0003_problematic_apps_warning", problematic_apps=problematic_apps)
-
- if modified_files:
- message += "\n\n" + m18n.n("migration_0003_modified_files", manually_modified_files=modified_files)
-
- return message
-
- def patch_apt_sources_list(self):
-
- sources_list = glob.glob("/etc/apt/sources.list.d/*.list")
- sources_list.append("/etc/apt/sources.list")
-
- # This :
- # - replace single 'jessie' occurence by 'stretch'
- # - comments lines containing "backports"
- # - replace 'jessie/updates' by 'strech/updates' (or same with a -)
- # - switch yunohost's repo to forge
- for f in sources_list:
- command = "sed -i -e 's@ jessie @ stretch @g' " \
- "-e '/backports/ s@^#*@#@' " \
- "-e 's@ jessie/updates @ stretch/updates @g' " \
- "-e 's@ jessie-updates @ stretch-updates @g' " \
- "-e 's@repo.yunohost@forge.yunohost@g' " \
- "{}".format(f)
- os.system(command)
-
- def get_apps_equivs_packages(self):
-
- command = "dpkg --get-selections" \
- " | grep -v deinstall" \
- " | awk '{print $1}'" \
- " | { grep 'ynh-deps$' || true; }"
-
- output = check_output(command).strip()
-
- return output.split('\n') if output else []
-
- def hold(self, packages):
- for package in packages:
- os.system("apt-mark hold {}".format(package))
-
- def unhold(self, packages):
- for package in packages:
- os.system("apt-mark unhold {}".format(package))
-
- def apt_update(self):
-
- command = "apt-get update"
- logger.debug("Running apt command :\n{}".format(command))
- command += " 2>&1 | tee -a {}".format(self.logfile)
-
- os.system(command)
-
- def upgrade_yunohost_packages(self):
-
- #
- # Here we use a dirty hack to run a command after the current
- # "yunohost tools migrations migrate", because the upgrade of
- # yunohost will also trigger another "yunohost tools migrations migrate"
- # (also the upgrade of the package, if executed from the webadmin, is
- # likely to kill/restart the api which is in turn likely to kill this
- # command before it ends...)
- #
-
- MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock"
-
- upgrade_command = ""
- upgrade_command += " DEBIAN_FRONTEND=noninteractive"
- upgrade_command += " APT_LISTCHANGES_FRONTEND=none"
- upgrade_command += " apt-get install"
- upgrade_command += " --assume-yes "
- upgrade_command += " ".join(YUNOHOST_PACKAGES)
- # We also install php-zip and php7.0-acpu to fix an issue with
- # nextcloud and kanboard that need it when on stretch.
- upgrade_command += " php-zip php7.0-apcu"
- upgrade_command += " 2>&1 | tee -a {}".format(self.logfile)
-
- wait_until_end_of_yunohost_command = "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK)
-
- command = "({} && {}; echo 'Migration complete!') &".format(wait_until_end_of_yunohost_command,
- upgrade_command)
-
- logger.debug("Running command :\n{}".format(command))
-
- os.system(command)
-
- def apt_dist_upgrade(self, conf_flags):
-
- # Make apt-get happy
- os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections")
- # Don't send an email to root about the postgresql migration. It should be handled automatically after.
- os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections")
-
- command = ""
- command += " DEBIAN_FRONTEND=noninteractive"
- command += " APT_LISTCHANGES_FRONTEND=none"
- command += " apt-get"
- command += " --fix-broken --show-upgraded --assume-yes"
- for conf_flag in conf_flags:
- command += ' -o Dpkg::Options::="--force-conf{}"'.format(conf_flag)
- command += " dist-upgrade"
-
- logger.debug("Running apt command :\n{}".format(command))
-
- command += " 2>&1 | tee -a {}".format(self.logfile)
-
- is_api = msettings.get('interface') == 'api'
- if is_api:
- callbacks = (
- lambda l: logger.info(l.rstrip()),
- lambda l: logger.warning(l.rstrip()),
- )
- call_async_output(command, callbacks, shell=True)
- else:
- # We do this when running from the cli to have the output of the
- # command showing in the terminal, since 'info' channel is only
- # enabled if the user explicitly add --verbose ...
- os.system(command)
-
- # Those are files that should be kept and restored before the final switch
- # to yunohost 3.x... They end up being modified by the various dist-upgrades
- # (or need to be taken out momentarily), which then blocks the regen-conf
- # as they are flagged as "manually modified"...
- files_to_keep = [
- "/etc/mysql/my.cnf",
- "/etc/nslcd.conf",
- "/etc/postfix/master.cf",
- "/etc/fail2ban/filter.d/yunohost.conf"
- ]
-
- def backup_files_to_keep(self):
-
- logger.debug("Backuping specific files to keep ...")
-
- # Create tmp directory if it does not exists
- tmp_dir = os.path.join("/tmp/", self.name)
- if not os.path.exists(tmp_dir):
- os.mkdir(tmp_dir, 0o700)
-
- for f in self.files_to_keep:
- dest_file = f.strip('/').replace("/", "_")
-
- # If the file is already there, we might be re-running the migration
- # because it previously crashed. Hence we keep the existing file.
- if os.path.exists(os.path.join(tmp_dir, dest_file)):
- continue
-
- copy2(f, os.path.join(tmp_dir, dest_file))
-
- def restore_files_to_keep(self):
-
- logger.debug("Restoring specific files to keep ...")
-
- tmp_dir = os.path.join("/tmp/", self.name)
-
- for f in self.files_to_keep:
- dest_file = f.strip('/').replace("/", "_")
- copy2(os.path.join(tmp_dir, dest_file), f)
-
- # On some setups, /etc/nginx/nginx.conf got edited. But this file needs
- # to be upgraded because of the way the new module system works for nginx.
- # (in particular, having the line that include the modules at the top)
- #
- # So here, if it got edited, we force the restore of the original conf
- # *before* starting the actual upgrade...
- #
- # An alternative strategy that was attempted was to hold the nginx-common
- # package and have a specific upgrade for it like for fail2ban, but that
- # leads to apt complaining about not being able to upgrade for shitty
- # reasons >.>
- def restore_original_nginx_conf_if_needed(self):
- if "/etc/nginx/nginx.conf" not in manually_modified_files_compared_to_debian_default():
- return
-
- if not os.path.exists("/etc/nginx/nginx.conf"):
- return
-
- # If stretch is in the sources.list, we already started migrating on
- # stretch so we don't re-do this
- if " stretch " in read_file("/etc/apt/sources.list"):
- return
-
- backup_dest = "/home/yunohost.conf/backup/nginx.conf.bkp_before_stretch"
-
- logger.warning(m18n.n("migration_0003_restoring_origin_nginx_conf",
- backup_dest=backup_dest))
-
- os.system("mv /etc/nginx/nginx.conf %s" % backup_dest)
-
- command = ""
- command += " DEBIAN_FRONTEND=noninteractive"
- command += " APT_LISTCHANGES_FRONTEND=none"
- command += " apt-get"
- command += " --fix-broken --show-upgraded --assume-yes"
- command += ' -o Dpkg::Options::="--force-confmiss"'
- command += " install --reinstall"
- command += " nginx-common"
-
- logger.debug("Running apt command :\n{}".format(command))
-
- command += " 2>&1 | tee -a {}".format(self.logfile)
-
- is_api = msettings.get('interface') == 'api'
- if is_api:
- callbacks = (
- lambda l: logger.info(l.rstrip()),
- lambda l: logger.warning(l.rstrip()),
- )
- call_async_output(command, callbacks, shell=True)
- else:
- # We do this when running from the cli to have the output of the
- # command showing in the terminal, since 'info' channel is only
- # enabled if the user explicitly add --verbose ...
- os.system(command)
-
- def disable_predicable_interface_names(self):
-
- # Try to see if currently used interface names are predictable ones or not...
- # If we ain't using "eth0" or "wlan0", assume we are using predictable interface
- # names and therefore they shouldnt be disabled
- network_interfaces = get_network_interfaces().keys()
- if "eth0" not in network_interfaces and "wlan0" not in network_interfaces:
- return
-
- interfaces_config = read_file("/etc/network/interfaces")
- if "eth0" not in interfaces_config and "wlan0" not in interfaces_config:
- return
-
- # Disable predictive interface names
- # c.f. https://unix.stackexchange.com/a/338730
- os.system("ln -s /dev/null /etc/systemd/network/99-default.link")
diff --git a/src/yunohost/data_migrations/0004_php5_to_php7_pools.py b/src/yunohost/data_migrations/0004_php5_to_php7_pools.py
deleted file mode 100644
index 46a5eb91d..000000000
--- a/src/yunohost/data_migrations/0004_php5_to_php7_pools.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import os
-import glob
-from shutil import copy2
-
-from moulinette.utils.log import getActionLogger
-
-from yunohost.tools import Migration
-from yunohost.service import _run_service_command
-
-logger = getActionLogger('yunohost.migration')
-
-PHP5_POOLS = "/etc/php5/fpm/pool.d"
-PHP7_POOLS = "/etc/php/7.0/fpm/pool.d"
-
-PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm"
-PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
-
-MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS)
-
-
-class MyMigration(Migration):
-
- "Migrate php5-fpm 'pool' conf files to php7 stuff"
-
- def migrate(self):
-
- # Get list of php5 pool files
- php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS))
-
- # Keep only basenames
- php5_pool_files = [os.path.basename(f) for f in php5_pool_files]
-
- # Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
- php5_pool_files = [f for f in php5_pool_files if f != "www.conf"]
-
- for f in php5_pool_files:
-
- # Copy the files to the php7 pool
- src = "{}/{}".format(PHP5_POOLS, f)
- dest = "{}/{}".format(PHP7_POOLS, f)
- copy2(src, dest)
-
- # Replace the socket prefix if it's found
- c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest)
- os.system(c)
-
- # Also add a comment that it was automatically moved from php5
- # (for human traceability and backward migration)
- c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
- os.system(c)
-
- # Some old comments starting with '#' instead of ';' are not
- # compatible in php7
- c = "sed -i 's/^#/;#/g' {}".format(dest)
- os.system(c)
-
- # Reload/restart the php pools
- _run_service_command("restart", "php7.0-fpm")
- _run_service_command("enable", "php7.0-fpm")
- os.system("systemctl stop php5-fpm")
- os.system("systemctl disable php5-fpm")
- os.system("rm /etc/logrotate.d/php5-fpm") # We remove this otherwise the logrotate cron will be unhappy
-
- # Get list of nginx conf file
- nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf")
- for f in nginx_conf_files:
- # Replace the socket prefix if it's found
- c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, f)
- os.system(c)
-
- # Reload nginx
- _run_service_command("reload", "nginx")
-
- def backward(self):
-
- # Get list of php7 pool files
- php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS))
-
- # Keep only files which have the migration comment
- php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT]
-
- # Delete those files
- for f in php7_pool_files:
- os.remove(f)
-
- # Reload/restart the php pools
- _run_service_command("stop", "php7.0-fpm")
- os.system("systemctl start php5-fpm")
-
- # Get list of nginx conf file
- nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf")
- for f in nginx_conf_files:
- # Replace the socket prefix if it's found
- c = "sed -i -e 's@{}@{}@g' {}".format(PHP7_SOCKETS_PREFIX, PHP5_SOCKETS_PREFIX, f)
- os.system(c)
-
- # Reload nginx
- _run_service_command("reload", "nginx")
diff --git a/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py b/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py
deleted file mode 100644
index 5ae729b60..000000000
--- a/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import subprocess
-
-from moulinette import m18n
-from yunohost.utils.error import YunohostError
-from moulinette.utils.log import getActionLogger
-
-from yunohost.tools import Migration
-from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory
-
-logger = getActionLogger('yunohost.migration')
-
-
-class MyMigration(Migration):
-
- "Migrate DBs from Postgresql 9.4 to 9.6 after migrating to Stretch"
-
- def migrate(self):
-
- if not self.package_is_installed("postgresql-9.4"):
- logger.warning(m18n.n("migration_0005_postgresql_94_not_installed"))
- return
-
- if not self.package_is_installed("postgresql-9.6"):
- raise YunohostError("migration_0005_postgresql_96_not_installed")
-
- if not space_used_by_directory("/var/lib/postgresql/9.4") > free_space_in_directory("/var/lib/postgresql"):
- raise YunohostError("migration_0005_not_enough_space", path="/var/lib/postgresql/")
-
- subprocess.check_call("service postgresql stop", shell=True)
- subprocess.check_call("pg_dropcluster --stop 9.6 main", shell=True)
- subprocess.check_call("pg_upgradecluster -m upgrade 9.4 main", shell=True)
- subprocess.check_call("pg_dropcluster --stop 9.4 main", shell=True)
- subprocess.check_call("service postgresql start", shell=True)
-
- def backward(self):
-
- pass
-
- def package_is_installed(self, package_name):
-
- p = subprocess.Popen("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), shell=True)
- p.communicate()
- return p.returncode == 0
diff --git a/src/yunohost/data_migrations/0006_sync_admin_and_root_passwords.py b/src/yunohost/data_migrations/0006_sync_admin_and_root_passwords.py
deleted file mode 100644
index cd13d680d..000000000
--- a/src/yunohost/data_migrations/0006_sync_admin_and_root_passwords.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import spwd
-import crypt
-import random
-import string
-import subprocess
-
-from moulinette import m18n
-from yunohost.utils.error import YunohostError
-from moulinette.utils.log import getActionLogger
-from moulinette.utils.process import run_commands, check_output
-from moulinette.utils.filesystem import append_to_file
-from moulinette.authenticators.ldap import Authenticator
-from yunohost.tools import Migration
-
-logger = getActionLogger('yunohost.migration')
-SMALL_PWD_LIST = ["yunohost", "olinuxino", "olinux", "raspberry", "admin", "root", "test", "rpi"]
-
-
-class MyMigration(Migration):
-
- "Synchronize admin and root passwords"
-
- def migrate(self):
-
- new_hash = self._get_admin_hash()
- self._replace_root_hash(new_hash)
-
- logger.info(m18n.n("root_password_replaced_by_admin_password"))
-
- def backward(self):
- pass
-
- @property
- def mode(self):
-
- # If the root password is still a "default" value,
- # then this is an emergency and migration shall
- # be applied automatically
- #
- # Otherwise, as playing with root password is touchy,
- # we set this as a manual migration.
- return "auto" if self._is_root_pwd_listed(SMALL_PWD_LIST) else "manual"
-
- @property
- def disclaimer(self):
- if self._is_root_pwd_listed(SMALL_PWD_LIST):
- return None
-
- return m18n.n("migration_0006_disclaimer")
-
- def _get_admin_hash(self):
- """
- Fetch the admin hash from the LDAP db using slapcat
- """
- admin_hash = check_output("slapcat \
- | grep 'dn: cn=admin,dc=yunohost,dc=org' -A20 \
- | grep userPassword -A2 \
- | tr -d '\n ' \
- | tr ':' ' ' \
- | awk '{print $2}' \
- | base64 -d \
- | sed 's/{CRYPT}//g'")
- return admin_hash
-
- def _replace_root_hash(self, new_hash):
- hash_root = spwd.getspnam("root").sp_pwd
-
- with open('/etc/shadow', 'r') as before_file:
- before = before_file.read()
-
- with open('/etc/shadow', 'w') as after_file:
- after_file.write(before.replace("root:" + hash_root,
- "root:" + new_hash))
-
- def _is_root_pwd_listed(self, pwd_list):
- hash_root = spwd.getspnam("root").sp_pwd
-
- for password in pwd_list:
- if hash_root == crypt.crypt(password, hash_root):
- return True
- return False
diff --git a/src/yunohost/data_migrations/0007_ssh_conf_managed_by_yunohost_step1.py b/src/yunohost/data_migrations/0007_ssh_conf_managed_by_yunohost_step1.py
deleted file mode 100644
index feffdc27c..000000000
--- a/src/yunohost/data_migrations/0007_ssh_conf_managed_by_yunohost_step1.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import os
-import re
-
-from shutil import copyfile
-
-from moulinette.utils.log import getActionLogger
-from moulinette.utils.filesystem import mkdir, rm
-
-from yunohost.tools import Migration
-from yunohost.service import _run_service_command
-from yunohost.regenconf import regen_conf
-from yunohost.settings import settings_set
-from yunohost.utils.error import YunohostError
-
-logger = getActionLogger('yunohost.migration')
-
-SSHD_CONF = '/etc/ssh/sshd_config'
-
-
-class MyMigration(Migration):
-
- """
- This is the first step of a couple of migrations that ensure SSH conf is
- managed by YunoHost (even if the "from_script" flag is present, which was
- previously preventing it from being managed by YunoHost)
-
- The goal of this first (automatic) migration is to make sure that the
- sshd_config is managed by the regen-conf mechanism.
-
- If the from_script flag exists, then we keep the current SSH conf such that it
- will appear as "manually modified" to the regenconf.
-
- In step 2 (manual), the admin will be able to choose wether or not to actually
- use the recommended configuration, with an appropriate disclaimer.
- """
-
- def migrate(self):
-
- # Check if deprecated DSA Host Key is in config
- dsa_rgx = r'^[ \t]*HostKey[ \t]+/etc/ssh/ssh_host_dsa_key[ \t]*(?:#.*)?$'
- dsa = False
- for line in open(SSHD_CONF):
- if re.match(dsa_rgx, line) is not None:
- dsa = True
- break
- if dsa:
- settings_set("service.ssh.allow_deprecated_dsa_hostkey", True)
-
- # Here, we make it so that /etc/ssh/sshd_config is managed
- # by the regen conf (in particular in the case where the
- # from_script flag is present - in which case it was *not*
- # managed by the regenconf)
- # But because we can't be sure the user wants to use the
- # recommended conf, we backup then restore the /etc/ssh/sshd_config
- # right after the regenconf, such that it will appear as
- # "manually modified".
- if os.path.exists('/etc/yunohost/from_script'):
- rm('/etc/yunohost/from_script')
- copyfile(SSHD_CONF, '/etc/ssh/sshd_config.bkp')
- regen_conf(names=['ssh'], force=True)
- copyfile('/etc/ssh/sshd_config.bkp', SSHD_CONF)
-
- # Restart ssh and backward if it fail
- if not _run_service_command('restart', 'ssh'):
- self.backward()
- raise YunohostError("migration_0007_cancel")
-
- def backward(self):
-
- # We don't backward completely but it should be enough
- copyfile('/etc/ssh/sshd_config.bkp', SSHD_CONF)
- if not _run_service_command('restart', 'ssh'):
- raise YunohostError("migration_0007_cannot_restart")
diff --git a/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py b/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py
deleted file mode 100644
index 8984440bd..000000000
--- a/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import os
-import re
-
-from moulinette import m18n
-from moulinette.utils.log import getActionLogger
-from moulinette.utils.filesystem import chown
-
-from yunohost.tools import Migration
-from yunohost.regenconf import _get_conf_hashes, _calculate_hash
-from yunohost.regenconf import regen_conf
-from yunohost.settings import settings_set, settings_get
-from yunohost.utils.error import YunohostError
-from yunohost.backup import ARCHIVES_PATH
-
-
-logger = getActionLogger('yunohost.migration')
-
-SSHD_CONF = '/etc/ssh/sshd_config'
-
-
-class MyMigration(Migration):
-
- """
- In this second step, the admin is asked if it's okay to use
- the recommended SSH configuration - which also implies
- disabling deprecated DSA key.
-
- This has important implications in the way the user may connect
- to its server (key change, and a spooky warning might be given
- by SSH later)
-
- A disclaimer explaining the various things to be aware of is
- shown - and the user may also choose to skip this migration.
- """
-
- def migrate(self):
- settings_set("service.ssh.allow_deprecated_dsa_hostkey", False)
- regen_conf(names=['ssh'], force=True)
-
- # Update local archives folder permissions, so that
- # admin can scp archives out of the server
- if os.path.isdir(ARCHIVES_PATH):
- chown(ARCHIVES_PATH, uid="admin", gid="root")
-
- def backward(self):
-
- raise YunohostError("migration_0008_backward_impossible")
-
- @property
- def mode(self):
-
- # If the conf is already up to date
- # and no DSA key is used, then we're good to go
- # and the migration can be done automatically
- # (basically nothing shall change)
- ynh_hash = _get_conf_hashes('ssh').get(SSHD_CONF, None)
- current_hash = _calculate_hash(SSHD_CONF)
- dsa = settings_get("service.ssh.allow_deprecated_dsa_hostkey")
- if ynh_hash == current_hash and not dsa:
- return "auto"
-
- return "manual"
-
- @property
- def disclaimer(self):
-
- if self.mode == "auto":
- return None
-
- # Detect key things to be aware of before enabling the
- # recommended configuration
- dsa_key_enabled = False
- ports = []
- root_login = []
- port_rgx = r'^[ \t]*Port[ \t]+(\d+)[ \t]*(?:#.*)?$'
- root_rgx = r'^[ \t]*PermitRootLogin[ \t]([^# \t]*)[ \t]*(?:#.*)?$'
- dsa_rgx = r'^[ \t]*HostKey[ \t]+/etc/ssh/ssh_host_dsa_key[ \t]*(?:#.*)?$'
- for line in open(SSHD_CONF):
-
- ports = ports + re.findall(port_rgx, line)
-
- root_login = root_login + re.findall(root_rgx, line)
-
- if not dsa_key_enabled and re.match(dsa_rgx, line) is not None:
- dsa_key_enabled = True
-
- custom_port = ports != ['22'] and ports != []
- root_login_enabled = root_login and root_login[-1] != 'no'
-
- # Build message
- message = m18n.n("migration_0008_general_disclaimer")
-
- if custom_port:
- message += "\n\n" + m18n.n("migration_0008_port")
-
- if root_login_enabled:
- message += "\n\n" + m18n.n("migration_0008_root")
-
- if dsa_key_enabled:
- message += "\n\n" + m18n.n("migration_0008_dsa")
-
- if custom_port or root_login_enabled or dsa_key_enabled:
- message += "\n\n" + m18n.n("migration_0008_warning")
- else:
- message += "\n\n" + m18n.n("migration_0008_no_warning")
-
- return message
diff --git a/src/yunohost/data_migrations/0009_decouple_regenconf_from_services.py b/src/yunohost/data_migrations/0009_decouple_regenconf_from_services.py
deleted file mode 100644
index d552d7c9c..000000000
--- a/src/yunohost/data_migrations/0009_decouple_regenconf_from_services.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-
-from moulinette import m18n
-from moulinette.utils.log import getActionLogger
-
-from moulinette.utils.filesystem import read_file
-from yunohost.service import _get_services, _save_services
-from yunohost.regenconf import _update_conf_hashes, REGEN_CONF_FILE
-
-from yunohost.tools import Migration
-
-logger = getActionLogger('yunohost.migration')
-
-
-class MyMigration(Migration):
- """
- Decouple the regen conf mechanism from the concept of services
- """
-
- def migrate(self):
-
- if "conffiles" not in read_file("/etc/yunohost/services.yml") \
- or os.path.exists(REGEN_CONF_FILE):
- logger.warning(m18n.n("migration_0009_not_needed"))
- return
-
- # For all services
- services = _get_services()
- for service, infos in services.items():
- # If there are some conffiles (file hashes)
- if "conffiles" in infos.keys():
- # Save them using the new regen conf thingy
- _update_conf_hashes(service, infos["conffiles"])
- # And delete the old conffile key from the service infos
- del services[service]["conffiles"]
-
- # (Actually save the modification of services)
- _save_services(services)
-
- def backward(self):
-
- pass
diff --git a/src/yunohost/data_migrations/0010_migrate_to_apps_json.py b/src/yunohost/data_migrations/0010_migrate_to_apps_json.py
deleted file mode 100644
index 43ae9a86f..000000000
--- a/src/yunohost/data_migrations/0010_migrate_to_apps_json.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os
-
-from moulinette.utils.log import getActionLogger
-from yunohost.app import app_fetchlist, app_removelist, _read_appslist_list, APPSLISTS_JSON
-from yunohost.tools import Migration
-
-logger = getActionLogger('yunohost.migration')
-
-BASE_CONF_PATH = '/home/yunohost.conf'
-BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, 'backup')
-APPSLISTS_BACKUP = os.path.join(BACKUP_CONF_DIR, "appslist_before_migration_to_unified_list.json")
-
-
-class MyMigration(Migration):
-
- "Migrate from official.json to apps.json"
-
- def migrate(self):
-
- # Backup current app list json
- os.system("cp %s %s" % (APPSLISTS_JSON, APPSLISTS_BACKUP))
-
- # Remove all the deprecated lists
- lists_to_remove = [
- "app.yunohost.org/list.json", # Old list on old installs, alias to official.json
- "app.yunohost.org/official.json",
- "app.yunohost.org/community.json",
- "labriqueinter.net/apps/labriqueinternet.json",
- "labriqueinter.net/internetcube.json"
- ]
-
- appslists = _read_appslist_list()
- for appslist, infos in appslists.items():
- if infos["url"].split("//")[-1] in lists_to_remove:
- app_removelist(name=appslist)
-
- # Replace by apps.json list
- app_fetchlist(name="yunohost",
- url="https://app.yunohost.org/apps.json")
-
- def backward(self):
-
- if os.path.exists(APPSLISTS_BACKUP):
- os.system("cp %s %s" % (APPSLISTS_BACKUP, APPSLISTS_JSON))
diff --git a/src/yunohost/data_migrations/0011_setup_group_permission.py b/src/yunohost/data_migrations/0011_setup_group_permission.py
deleted file mode 100644
index 05c426936..000000000
--- a/src/yunohost/data_migrations/0011_setup_group_permission.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import yaml
-import time
-import os
-
-from moulinette import m18n
-from yunohost.utils.error import YunohostError
-from moulinette.utils.log import getActionLogger
-
-from yunohost.tools import Migration
-from yunohost.user import user_group_add, user_group_update
-from yunohost.app import app_setting, app_list
-from yunohost.regenconf import regen_conf
-from yunohost.permission import permission_add, permission_sync_to_user
-from yunohost.user import user_permission_add
-
-logger = getActionLogger('yunohost.migration')
-
-###################################################
-# Tools used also for restoration
-###################################################
-
-class MyMigration(Migration):
- """
- Update the LDAP DB to be able to store the permission
- Create a group for each yunohost user
- Migrate app permission from apps setting to LDAP
- """
-
- required = True
-
- def migrate_LDAP_db(self):
-
- logger.info(m18n.n("migration_0011_update_LDAP_database"))
-
- from yunohost.utils.ldap import _get_ldap_interface
- ldap = _get_ldap_interface()
-
- try:
- ldap.remove('cn=sftpusers,ou=groups')
- except:
- logger.warn(m18n.n("error_when_removing_sftpuser_group"))
-
- with open('/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml') as f:
- ldap_map = yaml.load(f)
-
- try:
- attr_dict = ldap_map['parents']['ou=permission']
- ldap.add('ou=permission', attr_dict)
-
- attr_dict = ldap_map['children']['cn=all_users,ou=groups']
- ldap.add('cn=all_users,ou=groups', attr_dict)
-
- for rdn, attr_dict in ldap_map['depends_children'].items():
- ldap.add(rdn, attr_dict)
- except Exception as e:
- raise YunohostError("migration_0011_LDAP_update_failed", error=e)
-
- logger.info(m18n.n("migration_0011_create_group"))
-
- # Create a group for each yunohost user
- user_list = ldap.search('ou=users,dc=yunohost,dc=org',
- '(&(objectclass=person)(!(uid=root))(!(uid=nobody)))',
- ['uid', 'uidNumber'])
- for user_info in user_list:
- username = user_info['uid'][0]
- ldap.update('uid=%s,ou=users' % username,
- {'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh']})
- user_group_add(username, gid=user_info['uidNumber'][0], sync_perm=False)
- user_group_update(groupname=username, add_user=username, force=True, sync_perm=False)
- user_group_update(groupname='all_users', add_user=username, force=True, sync_perm=False)
-
-
- def migrate_app_permission(self, app=None):
- logger.info(m18n.n("migration_0011_migrate_permission"))
-
- if app:
- apps = app_list(installed=True, filter=app)['apps']
- else:
- apps = app_list(installed=True)['apps']
-
- for app_info in apps:
- app = app_info['id']
- permission = app_setting(app, 'allowed_users')
- path = app_setting(app, 'path')
- domain = app_setting(app, 'domain')
-
- urls = [domain + path] if domain and path else None
- permission_add(app, permission='main', urls=urls, default_allow=True, sync_perm=False)
- if permission:
- allowed_group = permission.split(',')
- user_permission_add([app], permission='main', group=allowed_group, sync_perm=False)
- app_setting(app, 'allowed_users', delete=True)
-
-
- def migrate(self):
- # Check if the migration can be processed
- ldap_regen_conf_status = regen_conf(names=['slapd'], dry_run=True)
- # By this we check if the have been customized
- if ldap_regen_conf_status and ldap_regen_conf_status['slapd']['pending']:
- raise YunohostError("migration_0011_LDAP_config_dirty")
-
- # Backup LDAP and the apps settings before to do the migration
- logger.info(m18n.n("migration_0011_backup_before_migration"))
- try:
- backup_folder = "/home/yunohost.backup/premigration/" + time.strftime('%Y%m%d-%H%M%S', time.gmtime())
- os.makedirs(backup_folder, 0o750)
- os.system("systemctl stop slapd")
- os.system("cp -r --preserve /etc/ldap %s/ldap_config" % backup_folder)
- os.system("cp -r --preserve /var/lib/ldap %s/ldap_db" % backup_folder)
- os.system("cp -r --preserve /etc/yunohost/apps %s/apps_settings" % backup_folder)
- except Exception as e:
- raise YunohostError("migration_0011_can_not_backup_before_migration", error=e)
- finally:
- os.system("systemctl start slapd")
-
- try:
- # Update LDAP schema restart slapd
- logger.info(m18n.n("migration_0011_update_LDAP_schema"))
- regen_conf(names=['slapd'], force=True)
-
- # Update LDAP database
- self.migrate_LDAP_db()
-
- # Migrate permission
- self.migrate_app_permission()
-
- permission_sync_to_user()
- except Exception as e:
- logger.warn(m18n.n("migration_0011_migration_failed_trying_to_rollback"))
- os.system("systemctl stop slapd")
- os.system("rm -r /etc/ldap/slapd.d") # To be sure that we don't keep some part of the old config
- os.system("cp -r --preserve %s/ldap_config/. /etc/ldap/" % backup_folder)
- os.system("cp -r --preserve %s/ldap_db/. /var/lib/ldap/" % backup_folder)
- os.system("cp -r --preserve %s/apps_settings/. /etc/yunohost/apps/" % backup_folder)
- os.system("systemctl start slapd")
- os.system("rm -r " + backup_folder)
- logger.info(m18n.n("migration_0011_rollback_success"))
- raise
- else:
- os.system("rm -r " + backup_folder)
-
- logger.info(m18n.n("migration_0011_done"))
diff --git a/src/yunohost/data_migrations/0015_migrate_to_buster.py b/src/yunohost/data_migrations/0015_migrate_to_buster.py
new file mode 100644
index 000000000..4f2d4caf8
--- /dev/null
+++ b/src/yunohost/data_migrations/0015_migrate_to_buster.py
@@ -0,0 +1,291 @@
+import glob
+import os
+
+from moulinette import m18n
+from yunohost.utils.error import YunohostError
+from moulinette.utils.log import getActionLogger
+from moulinette.utils.process import check_output, call_async_output
+from moulinette.utils.filesystem import read_file
+
+from yunohost.tools import Migration, tools_update, tools_upgrade
+from yunohost.app import unstable_apps
+from yunohost.regenconf import manually_modified_files
+from yunohost.utils.filesystem import free_space_in_directory
+from yunohost.utils.packages import (
+ get_ynh_package_version,
+ _list_upgradable_apt_packages,
+)
+
+logger = getActionLogger("yunohost.migration")
+
+
+class MyMigration(Migration):
+
+ "Upgrade the system to Debian Buster and Yunohost 4.x"
+
+ mode = "manual"
+
+ def run(self):
+
+ self.check_assertions()
+
+ logger.info(m18n.n("migration_0015_start"))
+
+ #
+ # Make sure certificates do not use weak signature hash algorithms (md5, sha1)
+ # otherwise nginx will later refuse to start which result in
+ # catastrophic situation
+ #
+ self.validate_and_upgrade_cert_if_necessary()
+
+ #
+ # Patch sources.list
+ #
+ logger.info(m18n.n("migration_0015_patching_sources_list"))
+ self.patch_apt_sources_list()
+ tools_update(target="system")
+
+ # Tell libc6 it's okay to restart system stuff during the upgrade
+ os.system(
+ "echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections"
+ )
+
+ # Don't send an email to root about the postgresql migration. It should be handled automatically after.
+ os.system(
+ "echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections"
+ )
+
+ #
+ # Specific packages upgrades
+ #
+ logger.info(m18n.n("migration_0015_specific_upgrade"))
+
+ # Update unscd independently, was 0.53-1+yunohost on stretch (custom build of ours) but now it's 0.53-1+b1 on vanilla buster,
+ # which for apt appears as a lower version (hence the --allow-downgrades and the hardcoded version number)
+ unscd_version = check_output(
+ 'dpkg -s unscd | grep "^Version: " | cut -d " " -f 2'
+ )
+ if "yunohost" in unscd_version:
+ new_version = check_output(
+ "LC_ALL=C apt policy unscd 2>/dev/null | grep -v '\\*\\*\\*' | grep http -B1 | head -n 1 | awk '{print $1}'"
+ ).strip()
+ if new_version:
+ self.apt_install("unscd=%s --allow-downgrades" % new_version)
+ else:
+ logger.warning("Could not identify which version of unscd to install")
+
+ # Upgrade libpam-modules independently, small issue related to willing to overwrite a file previously provided by Yunohost
+ libpammodules_version = check_output(
+ 'dpkg -s libpam-modules | grep "^Version: " | cut -d " " -f 2'
+ )
+ if not libpammodules_version.startswith("1.3"):
+ self.apt_install('libpam-modules -o Dpkg::Options::="--force-overwrite"')
+
+ #
+ # Main upgrade
+ #
+ logger.info(m18n.n("migration_0015_main_upgrade"))
+
+ apps_packages = self.get_apps_equivs_packages()
+ self.hold(apps_packages)
+ tools_upgrade(target="system", allow_yunohost_upgrade=False)
+
+ if self.debian_major_version() == 9:
+ raise YunohostError("migration_0015_still_on_stretch_after_main_upgrade")
+
+ # Clean the mess
+ logger.info(m18n.n("migration_0015_cleaning_up"))
+ os.system("apt autoremove --assume-yes")
+ os.system("apt clean --assume-yes")
+
+ #
+ # Yunohost upgrade
+ #
+ logger.info(m18n.n("migration_0015_yunohost_upgrade"))
+ self.unhold(apps_packages)
+ tools_upgrade(target="system")
+
+ def debian_major_version(self):
+ # The python module "platform" and lsb_release are not reliable because
+ # on some setup, they may still return Release=9 even after upgrading to
+ # buster ... (Apparently this is related to OVH overriding some stuff
+ # with /etc/lsb-release for instance -_-)
+ # Instead, we rely on /etc/os-release which should be the raw info from
+ # the distribution...
+ return int(
+ check_output(
+ "grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2"
+ )
+ )
+
+ def yunohost_major_version(self):
+ return int(get_ynh_package_version("yunohost")["version"].split(".")[0])
+
+ def check_assertions(self):
+
+ # Be on stretch (9.x) and yunohost 3.x
+ # NB : we do both check to cover situations where the upgrade crashed
+ # in the middle and debian version could be > 9.x but yunohost package
+ # would still be in 3.x...
+ if (
+ not self.debian_major_version() == 9
+ and not self.yunohost_major_version() == 3
+ ):
+ raise YunohostError("migration_0015_not_stretch")
+
+ # Have > 1 Go free space on /var/ ?
+ if free_space_in_directory("/var/") / (1024 ** 3) < 1.0:
+ raise YunohostError("migration_0015_not_enough_free_space")
+
+ # Check system is up to date
+ # (but we don't if 'stretch' is already in the sources.list ...
+ # which means maybe a previous upgrade crashed and we're re-running it)
+ if " buster " not in read_file("/etc/apt/sources.list"):
+ tools_update(target="system")
+ upgradable_system_packages = list(_list_upgradable_apt_packages())
+ if upgradable_system_packages:
+ raise YunohostError("migration_0015_system_not_fully_up_to_date")
+
+ @property
+ def disclaimer(self):
+
+ # Avoid having a super long disclaimer + uncessary check if we ain't
+ # on stretch / yunohost 3.x anymore
+ # NB : we do both check to cover situations where the upgrade crashed
+ # in the middle and debian version could be >= 10.x but yunohost package
+ # would still be in 3.x...
+ if (
+ not self.debian_major_version() == 9
+ and not self.yunohost_major_version() == 3
+ ):
+ return None
+
+ # Get list of problematic apps ? I.e. not official or community+working
+ problematic_apps = unstable_apps()
+ problematic_apps = "".join(["\n - " + app for app in problematic_apps])
+
+ # Manually modified files ? (c.f. yunohost service regen-conf)
+ modified_files = manually_modified_files()
+ modified_files = "".join(["\n - " + f for f in modified_files])
+
+ message = m18n.n("migration_0015_general_warning")
+
+ message = (
+ "N.B.: This migration has been tested by the community over the last few months but has only been declared stable recently. If your server hosts critical services and if you are not too confident with debugging possible issues, we recommend you to wait a little bit more while we gather more feedback and polish things up. If on the other hand you are relatively confident with debugging small issues that may arise, you are encouraged to run this migration ;)! You can read about remaining known issues and feedback from the community here: https://forum.yunohost.org/t/12195\n\n"
+ + message
+ )
+
+ if problematic_apps:
+ message += "\n\n" + m18n.n(
+ "migration_0015_problematic_apps_warning",
+ problematic_apps=problematic_apps,
+ )
+
+ if modified_files:
+ message += "\n\n" + m18n.n(
+ "migration_0015_modified_files", manually_modified_files=modified_files
+ )
+
+ return message
+
+ def patch_apt_sources_list(self):
+
+ sources_list = glob.glob("/etc/apt/sources.list.d/*.list")
+ sources_list.append("/etc/apt/sources.list")
+
+ # This :
+ # - replace single 'stretch' occurence by 'buster'
+ # - comments lines containing "backports"
+ # - replace 'stretch/updates' by 'strech/updates' (or same with -)
+ for f in sources_list:
+ command = (
+ "sed -i -e 's@ stretch @ buster @g' "
+ "-e '/backports/ s@^#*@#@' "
+ "-e 's@ stretch/updates @ buster/updates @g' "
+ "-e 's@ stretch-@ buster-@g' "
+ "{}".format(f)
+ )
+ os.system(command)
+
+ def get_apps_equivs_packages(self):
+
+ command = (
+ "dpkg --get-selections"
+ " | grep -v deinstall"
+ " | awk '{print $1}'"
+ " | { grep 'ynh-deps$' || true; }"
+ )
+
+ output = check_output(command)
+
+ return output.split("\n") if output else []
+
+ def hold(self, packages):
+ for package in packages:
+ os.system("apt-mark hold {}".format(package))
+
+ def unhold(self, packages):
+ for package in packages:
+ os.system("apt-mark unhold {}".format(package))
+
+ def apt_install(self, cmd):
+ def is_relevant(line):
+ return "Reading database ..." not in line.rstrip()
+
+ callbacks = (
+ lambda l: logger.info("+ " + l.rstrip() + "\r")
+ if is_relevant(l)
+ else logger.debug(l.rstrip() + "\r"),
+ lambda l: logger.warning(l.rstrip()),
+ )
+
+ cmd = (
+ "LC_ALL=C DEBIAN_FRONTEND=noninteractive APT_LISTCHANGES_FRONTEND=none apt install --quiet -o=Dpkg::Use-Pty=0 --fix-broken --assume-yes "
+ + cmd
+ )
+
+ logger.debug("Running: %s" % cmd)
+
+ call_async_output(cmd, callbacks, shell=True)
+
+ def validate_and_upgrade_cert_if_necessary(self):
+
+ active_certs = set(
+ check_output("grep -roh '/.*crt.pem' /etc/nginx/").split("\n")
+ )
+
+ cmd = "LC_ALL=C openssl x509 -in %s -text -noout | grep -i 'Signature Algorithm:' | awk '{print $3}' | uniq"
+
+ default_crt = "/etc/yunohost/certs/yunohost.org/crt.pem"
+ default_key = "/etc/yunohost/certs/yunohost.org/key.pem"
+ default_signature = (
+ check_output(cmd % default_crt) if default_crt in active_certs else None
+ )
+ if default_signature is not None and (
+ default_signature.startswith("md5") or default_signature.startswith("sha1")
+ ):
+ logger.warning(
+ "%s is using a pretty old certificate incompatible with newer versions of nginx ... attempting to regenerate a fresh one"
+ % default_crt
+ )
+
+ os.system("mv %s %s.old" % (default_crt, default_crt))
+ os.system("mv %s %s.old" % (default_key, default_key))
+ ret = os.system("/usr/share/yunohost/hooks/conf_regen/02-ssl init")
+
+ if ret != 0 or not os.path.exists(default_crt):
+ logger.error("Upgrading the certificate failed ... reverting")
+ os.system("mv %s.old %s" % (default_crt, default_crt))
+ os.system("mv %s.old %s" % (default_key, default_key))
+
+ signatures = {cert: check_output(cmd % cert) for cert in active_certs}
+
+ def cert_is_weak(cert):
+ sig = signatures[cert]
+ return sig.startswith("md5") or sig.startswith("sha1")
+
+ weak_certs = [cert for cert in signatures.keys() if cert_is_weak(cert)]
+ if weak_certs:
+ raise YunohostError(
+ "migration_0015_weak_certs", certs=", ".join(weak_certs)
+ )
diff --git a/src/yunohost/data_migrations/0016_php70_to_php73_pools.py b/src/yunohost/data_migrations/0016_php70_to_php73_pools.py
new file mode 100644
index 000000000..6b424f211
--- /dev/null
+++ b/src/yunohost/data_migrations/0016_php70_to_php73_pools.py
@@ -0,0 +1,83 @@
+import os
+import glob
+from shutil import copy2
+
+from moulinette.utils.log import getActionLogger
+
+from yunohost.app import _is_installed, _patch_legacy_php_versions_in_settings
+from yunohost.tools import Migration
+from yunohost.service import _run_service_command
+
+logger = getActionLogger("yunohost.migration")
+
+PHP70_POOLS = "/etc/php/7.0/fpm/pool.d"
+PHP73_POOLS = "/etc/php/7.3/fpm/pool.d"
+
+PHP70_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
+PHP73_SOCKETS_PREFIX = "/run/php/php7.3-fpm"
+
+MIGRATION_COMMENT = (
+ "; YunoHost note : this file was automatically moved from {}".format(PHP70_POOLS)
+)
+
+
+class MyMigration(Migration):
+
+ "Migrate php7.0-fpm 'pool' conf files to php7.3"
+
+ dependencies = ["migrate_to_buster"]
+
+ def run(self):
+ # Get list of php7.0 pool files
+ php70_pool_files = glob.glob("{}/*.conf".format(PHP70_POOLS))
+
+ # Keep only basenames
+ php70_pool_files = [os.path.basename(f) for f in php70_pool_files]
+
+ # Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
+ php70_pool_files = [f for f in php70_pool_files if f != "www.conf"]
+
+ for f in php70_pool_files:
+
+ # Copy the files to the php7.3 pool
+ src = "{}/{}".format(PHP70_POOLS, f)
+ dest = "{}/{}".format(PHP73_POOLS, f)
+ copy2(src, dest)
+
+ # Replace the socket prefix if it's found
+ c = "sed -i -e 's@{}@{}@g' {}".format(
+ PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, dest
+ )
+ os.system(c)
+
+ # Also add a comment that it was automatically moved from php7.0
+ # (for human traceability and backward migration)
+ c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
+ os.system(c)
+
+ app_id = os.path.basename(f)[: -len(".conf")]
+ if _is_installed(app_id):
+ _patch_legacy_php_versions_in_settings(
+ "/etc/yunohost/apps/%s/" % app_id
+ )
+
+ nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/%s.conf" % app_id)
+ for f in nginx_conf_files:
+ # Replace the socket prefix if it's found
+ c = "sed -i -e 's@{}@{}@g' {}".format(
+ PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, f
+ )
+ os.system(c)
+
+ os.system(
+ "rm /etc/logrotate.d/php7.0-fpm"
+ ) # We remove this otherwise the logrotate cron will be unhappy
+
+ # Reload/restart the php pools
+ _run_service_command("restart", "php7.3-fpm")
+ _run_service_command("enable", "php7.3-fpm")
+ os.system("systemctl stop php7.0-fpm")
+ os.system("systemctl disable php7.0-fpm")
+
+ # Reload nginx
+ _run_service_command("reload", "nginx")
diff --git a/src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py b/src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py
new file mode 100644
index 000000000..1ccf5ccc9
--- /dev/null
+++ b/src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py
@@ -0,0 +1,82 @@
+import subprocess
+
+from moulinette import m18n
+from yunohost.utils.error import YunohostError, YunohostValidationError
+from moulinette.utils.log import getActionLogger
+
+from yunohost.tools import Migration
+from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory
+
+logger = getActionLogger("yunohost.migration")
+
+
+class MyMigration(Migration):
+
+ "Migrate DBs from Postgresql 9.6 to 11 after migrating to Buster"
+
+ dependencies = ["migrate_to_buster"]
+
+ def run(self):
+
+ if not self.package_is_installed("postgresql-9.6"):
+ logger.warning(m18n.n("migration_0017_postgresql_96_not_installed"))
+ return
+
+ if not self.package_is_installed("postgresql-11"):
+ raise YunohostValidationError("migration_0017_postgresql_11_not_installed")
+
+ # Make sure there's a 9.6 cluster
+ try:
+ self.runcmd("pg_lsclusters | grep -q '^9.6 '")
+ except Exception:
+ logger.warning(
+ "It looks like there's not active 9.6 cluster, so probably don't need to run this migration"
+ )
+ return
+
+ if not space_used_by_directory(
+ "/var/lib/postgresql/9.6"
+ ) > free_space_in_directory("/var/lib/postgresql"):
+ raise YunohostValidationError(
+ "migration_0017_not_enough_space", path="/var/lib/postgresql/"
+ )
+
+ self.runcmd("systemctl stop postgresql")
+ self.runcmd(
+ "LC_ALL=C pg_dropcluster --stop 11 main || true"
+ ) # We do not trigger an exception if the command fails because that probably means cluster 11 doesn't exists, which is fine because it's created during the pg_upgradecluster)
+ self.runcmd("LC_ALL=C pg_upgradecluster -m upgrade 9.6 main")
+ self.runcmd("LC_ALL=C pg_dropcluster --stop 9.6 main")
+ self.runcmd("systemctl start postgresql")
+
+ def package_is_installed(self, package_name):
+
+ (returncode, out, err) = self.runcmd(
+ "dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name),
+ raise_on_errors=False,
+ )
+ return returncode == 0
+
+ def runcmd(self, cmd, raise_on_errors=True):
+
+ logger.debug("Running command: " + cmd)
+
+ p = subprocess.Popen(
+ cmd,
+ shell=True,
+ executable="/bin/bash",
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+ out, err = p.communicate()
+ returncode = p.returncode
+ if raise_on_errors and returncode != 0:
+ raise YunohostError(
+ "Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(
+ cmd, returncode, out, err
+ )
+ )
+
+ out = out.strip().split(b"\n")
+ return (returncode, out, err)
diff --git a/src/yunohost/data_migrations/0018_xtable_to_nftable.py b/src/yunohost/data_migrations/0018_xtable_to_nftable.py
new file mode 100644
index 000000000..94b47d944
--- /dev/null
+++ b/src/yunohost/data_migrations/0018_xtable_to_nftable.py
@@ -0,0 +1,126 @@
+import os
+import subprocess
+
+from moulinette import m18n
+from yunohost.utils.error import YunohostError
+from moulinette.utils.log import getActionLogger
+
+from yunohost.firewall import firewall_reload
+from yunohost.service import service_restart
+from yunohost.tools import Migration
+
+logger = getActionLogger("yunohost.migration")
+
+
+class MyMigration(Migration):
+
+ "Migrate legacy iptables rules from stretch that relied on xtable and should now rely on nftable"
+
+ dependencies = ["migrate_to_buster"]
+
+ def run(self):
+
+ self.do_ipv4 = os.system("iptables -w -L >/dev/null") == 0
+ self.do_ipv6 = os.system("ip6tables -w -L >/dev/null") == 0
+
+ if not self.do_ipv4:
+ logger.warning(m18n.n("iptables_unavailable"))
+ if not self.do_ipv6:
+ logger.warning(m18n.n("ip6tables_unavailable"))
+
+ backup_folder = "/home/yunohost.backup/premigration/xtable_to_nftable/"
+ if not os.path.exists(backup_folder):
+ os.makedirs(backup_folder, 0o750)
+ self.backup_rules_ipv4 = os.path.join(backup_folder, "legacy_rules_ipv4")
+ self.backup_rules_ipv6 = os.path.join(backup_folder, "legacy_rules_ipv6")
+
+ # Backup existing legacy rules to be able to rollback
+ if self.do_ipv4 and not os.path.exists(self.backup_rules_ipv4):
+ self.runcmd(
+ "iptables-legacy -L >/dev/null"
+ ) # For some reason if we don't do this, iptables-legacy-save is empty ?
+ self.runcmd("iptables-legacy-save > %s" % self.backup_rules_ipv4)
+ assert (
+ open(self.backup_rules_ipv4).read().strip()
+ ), "Uhoh backup of legacy ipv4 rules is empty !?"
+ if self.do_ipv6 and not os.path.exists(self.backup_rules_ipv6):
+ self.runcmd(
+ "ip6tables-legacy -L >/dev/null"
+ ) # For some reason if we don't do this, iptables-legacy-save is empty ?
+ self.runcmd("ip6tables-legacy-save > %s" % self.backup_rules_ipv6)
+ assert (
+ open(self.backup_rules_ipv6).read().strip()
+ ), "Uhoh backup of legacy ipv6 rules is empty !?"
+
+ # We inject the legacy rules (iptables-legacy) into the new iptable (just "iptables")
+ try:
+ if self.do_ipv4:
+ self.runcmd("iptables-legacy-save | iptables-restore")
+ if self.do_ipv6:
+ self.runcmd("ip6tables-legacy-save | ip6tables-restore")
+ except Exception as e:
+ self.rollback()
+ raise YunohostError(
+ "migration_0018_failed_to_migrate_iptables_rules", error=e
+ )
+
+ # Reset everything in iptables-legacy
+ # Stolen from https://serverfault.com/a/200642
+ try:
+ if self.do_ipv4:
+ self.runcmd(
+ "iptables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat
+ ' /^:[A-Z]+ [^-]/ { print $1 " ACCEPT" ; }' # Turn all policies to accept
+ " /COMMIT/ { print $0; }'" # Keep the line COMMIT
+ " | iptables-legacy-restore"
+ )
+ if self.do_ipv6:
+ self.runcmd(
+ "ip6tables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat
+ ' /^:[A-Z]+ [^-]/ { print $1 " ACCEPT" ; }' # Turn all policies to accept
+ " /COMMIT/ { print $0; }'" # Keep the line COMMIT
+ " | ip6tables-legacy-restore"
+ )
+ except Exception as e:
+ self.rollback()
+ raise YunohostError("migration_0018_failed_to_reset_legacy_rules", error=e)
+
+ # You might be wondering "uh but is it really useful to
+ # iptables-legacy-save | iptables-restore considering firewall_reload()
+ # flush/resets everything anyway ?"
+ # But the answer is : firewall_reload() only resets the *filter table.
+ # On more complex setups (e.g. internet cube or docker) you will also
+ # have rules in the *nat (or maybe *raw?) sections of iptables.
+ firewall_reload()
+ service_restart("fail2ban")
+
+ def rollback(self):
+
+ if self.do_ipv4:
+ self.runcmd("iptables-legacy-restore < %s" % self.backup_rules_ipv4)
+ if self.do_ipv6:
+ self.runcmd("iptables-legacy-restore < %s" % self.backup_rules_ipv6)
+
+ def runcmd(self, cmd, raise_on_errors=True):
+
+ logger.debug("Running command: " + cmd)
+
+ p = subprocess.Popen(
+ cmd,
+ shell=True,
+ executable="/bin/bash",
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+ out, err = p.communicate()
+ returncode = p.returncode
+ if raise_on_errors and returncode != 0:
+ raise YunohostError(
+ "Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(
+ cmd, returncode, out, err
+ )
+ )
+
+ out = out.strip().split(b"\n")
+ return (returncode, out, err)
diff --git a/src/yunohost/data_migrations/0019_extend_permissions_features.py b/src/yunohost/data_migrations/0019_extend_permissions_features.py
new file mode 100644
index 000000000..5d4343deb
--- /dev/null
+++ b/src/yunohost/data_migrations/0019_extend_permissions_features.py
@@ -0,0 +1,107 @@
+from moulinette import m18n
+from moulinette.utils.log import getActionLogger
+
+from yunohost.tools import Migration
+from yunohost.permission import user_permission_list
+from yunohost.utils.legacy import migrate_legacy_permission_settings
+
+logger = getActionLogger("yunohost.migration")
+
+
+class MyMigration(Migration):
+ """
+ Add protected attribute in LDAP permission
+ """
+
+ @Migration.ldap_migration
+ def run(self, backup_folder):
+
+ # Update LDAP database
+ self.add_new_ldap_attributes()
+
+ # Migrate old settings
+ migrate_legacy_permission_settings()
+
+ def add_new_ldap_attributes(self):
+
+ from yunohost.utils.ldap import _get_ldap_interface
+ from yunohost.regenconf import regen_conf, BACKUP_CONF_DIR
+
+ # Check if the migration can be processed
+ ldap_regen_conf_status = regen_conf(names=["slapd"], dry_run=True)
+ # By this we check if the have been customized
+ if ldap_regen_conf_status and ldap_regen_conf_status["slapd"]["pending"]:
+ logger.warning(
+ m18n.n(
+ "migration_0019_slapd_config_will_be_overwritten",
+ conf_backup_folder=BACKUP_CONF_DIR,
+ )
+ )
+
+ # Update LDAP schema restart slapd
+ logger.info(m18n.n("migration_update_LDAP_schema"))
+ regen_conf(names=["slapd"], force=True)
+
+ logger.info(m18n.n("migration_0019_add_new_attributes_in_ldap"))
+ ldap = _get_ldap_interface()
+ permission_list = user_permission_list(full=True)["permissions"]
+
+ for permission in permission_list:
+ system_perms = {
+ "mail": "E-mail",
+ "xmpp": "XMPP",
+ "ssh": "SSH",
+ "sftp": "STFP",
+ }
+ if permission.split(".")[0] in system_perms:
+ update = {
+ "authHeader": ["FALSE"],
+ "label": [system_perms[permission.split(".")[0]]],
+ "showTile": ["FALSE"],
+ "isProtected": ["TRUE"],
+ }
+ else:
+ app, subperm_name = permission.split(".")
+ if permission.endswith(".main"):
+ update = {
+ "authHeader": ["TRUE"],
+ "label": [
+ app
+ ], # Note that this is later re-changed during the call to migrate_legacy_permission_settings() if a 'label' setting exists
+ "showTile": ["TRUE"],
+ "isProtected": ["FALSE"],
+ }
+ else:
+ update = {
+ "authHeader": ["TRUE"],
+ "label": [subperm_name.title()],
+ "showTile": ["FALSE"],
+ "isProtected": ["TRUE"],
+ }
+
+ ldap.update("cn=%s,ou=permission" % permission, update)
+
+ introduced_in_version = "4.1"
+
+ def run_after_system_restore(self):
+ # Update LDAP database
+ self.add_new_ldap_attributes()
+
+ def run_before_app_restore(self, app_id):
+ from yunohost.app import app_setting
+ from yunohost.utils.legacy import migrate_legacy_permission_settings
+
+ # Migrate old settings
+ legacy_permission_settings = [
+ "skipped_uris",
+ "unprotected_uris",
+ "protected_uris",
+ "skipped_regex",
+ "unprotected_regex",
+ "protected_regex",
+ ]
+ if any(
+ app_setting(app_id, setting) is not None
+ for setting in legacy_permission_settings
+ ):
+ migrate_legacy_permission_settings(app=app_id)
diff --git a/src/yunohost/data_migrations/0020_ssh_sftp_permissions.py b/src/yunohost/data_migrations/0020_ssh_sftp_permissions.py
new file mode 100644
index 000000000..f1dbcd1e7
--- /dev/null
+++ b/src/yunohost/data_migrations/0020_ssh_sftp_permissions.py
@@ -0,0 +1,100 @@
+import subprocess
+import os
+
+from moulinette import m18n
+from moulinette.utils.log import getActionLogger
+
+from yunohost.tools import Migration
+from yunohost.permission import user_permission_update, permission_sync_to_user
+from yunohost.regenconf import manually_modified_files
+
+logger = getActionLogger("yunohost.migration")
+
+###################################################
+# Tools used also for restoration
+###################################################
+
+
+class MyMigration(Migration):
+ """
+ Add new permissions around SSH/SFTP features
+ """
+
+ introduced_in_version = "4.2.2"
+ dependencies = ["extend_permissions_features"]
+
+ @Migration.ldap_migration
+ def run(self, *args):
+
+ from yunohost.utils.ldap import _get_ldap_interface
+
+ ldap = _get_ldap_interface()
+
+ existing_perms_raw = ldap.search(
+ "ou=permission,dc=yunohost,dc=org", "(objectclass=permissionYnh)", ["cn"]
+ )
+ existing_perms = [perm["cn"][0] for perm in existing_perms_raw]
+
+ # Add SSH and SFTP permissions
+ if "sftp.main" not in existing_perms:
+ ldap.add(
+ "cn=sftp.main,ou=permission",
+ {
+ "cn": "sftp.main",
+ "gidNumber": "5004",
+ "objectClass": ["posixGroup", "permissionYnh"],
+ "groupPermission": [],
+ "authHeader": "FALSE",
+ "label": "SFTP",
+ "showTile": "FALSE",
+ "isProtected": "TRUE",
+ },
+ )
+
+ if "ssh.main" not in existing_perms:
+ ldap.add(
+ "cn=ssh.main,ou=permission",
+ {
+ "cn": "ssh.main",
+ "gidNumber": "5003",
+ "objectClass": ["posixGroup", "permissionYnh"],
+ "groupPermission": [],
+ "authHeader": "FALSE",
+ "label": "SSH",
+ "showTile": "FALSE",
+ "isProtected": "TRUE",
+ },
+ )
+
+ # Add a bash terminal to each users
+ users = ldap.search(
+ "ou=users,dc=yunohost,dc=org",
+ filter="(loginShell=*)",
+ attrs=["dn", "uid", "loginShell"],
+ )
+ for user in users:
+ if user["loginShell"][0] == "/bin/false":
+ dn = user["dn"][0].replace(",dc=yunohost,dc=org", "")
+ ldap.update(dn, {"loginShell": ["/bin/bash"]})
+ else:
+ user_permission_update(
+ "ssh.main", add=user["uid"][0], sync_perm=False
+ )
+
+ permission_sync_to_user()
+
+ # Somehow this is needed otherwise the PAM thing doesn't forget about the
+ # old loginShell value ?
+ subprocess.call(["nscd", "-i", "passwd"])
+
+ if (
+ "/etc/ssh/sshd_config" in manually_modified_files()
+ and os.system(
+ "grep -q '^ *AllowGroups\\|^ *AllowUsers' /etc/ssh/sshd_config"
+ )
+ != 0
+ ):
+ logger.error(m18n.n("diagnosis_sshd_config_insecure"))
+
+ def run_after_system_restore(self):
+ self.run()
diff --git a/src/yunohost/diagnosis.py b/src/yunohost/diagnosis.py
new file mode 100644
index 000000000..4ac5e2731
--- /dev/null
+++ b/src/yunohost/diagnosis.py
@@ -0,0 +1,716 @@
+# -*- coding: utf-8 -*-
+
+""" License
+
+ Copyright (C) 2018 YunoHost
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published
+ by the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, see http://www.gnu.org/licenses
+
+"""
+
+""" diagnosis.py
+
+ Look for possible issues on the server
+"""
+
+import re
+import os
+import time
+
+from moulinette import m18n, Moulinette
+from moulinette.utils import log
+from moulinette.utils.filesystem import (
+ read_json,
+ write_to_json,
+ read_yaml,
+ write_to_yaml,
+)
+
+from yunohost.utils.error import YunohostError, YunohostValidationError
+from yunohost.hook import hook_list, hook_exec
+
+logger = log.getActionLogger("yunohost.diagnosis")
+
+DIAGNOSIS_CACHE = "/var/cache/yunohost/diagnosis/"
+DIAGNOSIS_CONFIG_FILE = "/etc/yunohost/diagnosis.yml"
+DIAGNOSIS_SERVER = "diagnosis.yunohost.org"
+
+
+def diagnosis_list():
+ all_categories_names = [h for h, _ in _list_diagnosis_categories()]
+ return {"categories": all_categories_names}
+
+
+def diagnosis_get(category, item):
+
+ # Get all the categories
+ all_categories = _list_diagnosis_categories()
+ all_categories_names = [c for c, _ in all_categories]
+
+ if category not in all_categories_names:
+ raise YunohostValidationError(
+ "diagnosis_unknown_categories", categories=category
+ )
+
+ if isinstance(item, list):
+ if any("=" not in criteria for criteria in item):
+ raise YunohostValidationError(
+ "Criterias should be of the form key=value (e.g. domain=yolo.test)"
+ )
+
+ # Convert the provided criteria into a nice dict
+ item = {c.split("=")[0]: c.split("=")[1] for c in item}
+
+ return Diagnoser.get_cached_report(category, item=item)
+
+
+def diagnosis_show(
+ categories=[], issues=False, full=False, share=False, human_readable=False
+):
+
+ if not os.path.exists(DIAGNOSIS_CACHE):
+ logger.warning(m18n.n("diagnosis_never_ran_yet"))
+ return
+
+ # Get all the categories
+ all_categories = _list_diagnosis_categories()
+ all_categories_names = [category for category, _ in all_categories]
+
+ # Check the requested category makes sense
+ if categories == []:
+ categories = all_categories_names
+ else:
+ unknown_categories = [c for c in categories if c not in all_categories_names]
+ if unknown_categories:
+ raise YunohostValidationError(
+ "diagnosis_unknown_categories", categories=", ".join(unknown_categories)
+ )
+
+ # Fetch all reports
+ all_reports = []
+ for category in categories:
+
+ try:
+ report = Diagnoser.get_cached_report(category)
+ except Exception as e:
+ logger.error(m18n.n("diagnosis_failed", category=category, error=str(e)))
+ continue
+
+ Diagnoser.i18n(report, force_remove_html_tags=share or human_readable)
+
+ add_ignore_flag_to_issues(report)
+ if not full:
+ del report["timestamp"]
+ del report["cached_for"]
+ report["items"] = [item for item in report["items"] if not item["ignored"]]
+ for item in report["items"]:
+ del item["meta"]
+ del item["ignored"]
+ if "data" in item:
+ del item["data"]
+ if issues:
+ report["items"] = [
+ item
+ for item in report["items"]
+ if item["status"] in ["WARNING", "ERROR"]
+ ]
+ # Ignore this category if no issue was found
+ if not report["items"]:
+ continue
+
+ all_reports.append(report)
+
+ if share:
+ from yunohost.utils.yunopaste import yunopaste
+
+ content = _dump_human_readable_reports(all_reports)
+ url = yunopaste(content)
+
+ logger.info(m18n.n("log_available_on_yunopaste", url=url))
+ if Moulinette.interface.type == "api":
+ return {"url": url}
+ else:
+ return
+ elif human_readable:
+ print(_dump_human_readable_reports(all_reports))
+ else:
+ return {"reports": all_reports}
+
+
+def _dump_human_readable_reports(reports):
+
+ output = ""
+
+ for report in reports:
+ output += "=================================\n"
+ output += "{description} ({id})\n".format(**report)
+ output += "=================================\n\n"
+ for item in report["items"]:
+ output += "[{status}] {summary}\n".format(**item)
+ for detail in item.get("details", []):
+ output += " - " + detail.replace("\n", "\n ") + "\n"
+ output += "\n"
+ output += "\n\n"
+
+ return output
+
+
+def diagnosis_run(
+ categories=[], force=False, except_if_never_ran_yet=False, email=False
+):
+
+ if (email or except_if_never_ran_yet) and not os.path.exists(DIAGNOSIS_CACHE):
+ return
+
+ # Get all the categories
+ all_categories = _list_diagnosis_categories()
+ all_categories_names = [category for category, _ in all_categories]
+
+ # Check the requested category makes sense
+ if categories == []:
+ categories = all_categories_names
+ else:
+ unknown_categories = [c for c in categories if c not in all_categories_names]
+ if unknown_categories:
+ raise YunohostValidationError(
+ "diagnosis_unknown_categories", categories=", ".join(unknown_categories)
+ )
+
+ issues = []
+ # Call the hook ...
+ diagnosed_categories = []
+ for category in categories:
+ logger.debug("Running diagnosis for %s ..." % category)
+ path = [p for n, p in all_categories if n == category][0]
+
+ try:
+ code, report = hook_exec(path, args={"force": force}, env=None)
+ except Exception:
+ import traceback
+
+ logger.error(
+ m18n.n(
+ "diagnosis_failed_for_category",
+ category=category,
+ error="\n" + traceback.format_exc(),
+ )
+ )
+ else:
+ diagnosed_categories.append(category)
+ if report != {}:
+ issues.extend(
+ [
+ item
+ for item in report["items"]
+ if item["status"] in ["WARNING", "ERROR"]
+ ]
+ )
+
+ if email:
+ _email_diagnosis_issues()
+ if issues and Moulinette.interface.type == "cli":
+ logger.warning(m18n.n("diagnosis_display_tip"))
+
+
+def diagnosis_ignore(filter, list=False):
+ return _diagnosis_ignore(add_filter=filter, list=list)
+
+
+def diagnosis_unignore(filter):
+ return _diagnosis_ignore(remove_filter=filter)
+
+
+def _diagnosis_ignore(add_filter=None, remove_filter=None, list=False):
+ """
+ This action is meant for the admin to ignore issues reported by the
+ diagnosis system if they are known and understood by the admin. For
+ example, the lack of ipv6 on an instance, or badly configured XMPP dns
+ records if the admin doesn't care so much about XMPP. The point being that
+ the diagnosis shouldn't keep complaining about those known and "expected"
+ issues, and instead focus on new unexpected issues that could arise.
+
+ For example, to ignore badly XMPP dnsrecords for domain yolo.test:
+
+ yunohost diagnosis ignore --add-filter dnsrecords domain=yolo.test category=xmpp
+ ^ ^ ^
+ the general additional other
+ diagnosis criterias criteria
+ category to to target to target
+ act on specific specific
+ reports reports
+ Or to ignore all dnsrecords issues:
+
+ yunohost diagnosis ignore --add-filter dnsrecords
+
+ The filters are stored in the diagnosis configuration in a data structure like:
+
+ ignore_filters: {
+ "ip": [
+ {"version": 6} # Ignore all issues related to ipv6
+ ],
+ "dnsrecords": [
+ {"domain": "yolo.test", "category": "xmpp"}, # Ignore all issues related to DNS xmpp records for yolo.test
+ {} # Ignore all issues about dnsrecords
+ ]
+ }
+ """
+
+ # Ignore filters are stored in
+ configuration = _diagnosis_read_configuration()
+
+ if list:
+ return {"ignore_filters": configuration.get("ignore_filters", {})}
+
+ def validate_filter_criterias(filter_):
+
+ # Get all the categories
+ all_categories = _list_diagnosis_categories()
+ all_categories_names = [category for category, _ in all_categories]
+
+ # Sanity checks for the provided arguments
+ if len(filter_) == 0:
+ raise YunohostValidationError(
+ "You should provide at least one criteria being the diagnosis category to ignore"
+ )
+ category = filter_[0]
+ if category not in all_categories_names:
+ raise YunohostValidationError("%s is not a diagnosis category" % category)
+ if any("=" not in criteria for criteria in filter_[1:]):
+ raise YunohostValidationError(
+ "Criterias should be of the form key=value (e.g. domain=yolo.test)"
+ )
+
+ # Convert the provided criteria into a nice dict
+ criterias = {c.split("=")[0]: c.split("=")[1] for c in filter_[1:]}
+
+ return category, criterias
+
+ if add_filter:
+
+ category, criterias = validate_filter_criterias(add_filter)
+
+ # Fetch current issues for the requested category
+ current_issues_for_this_category = diagnosis_show(
+ categories=[category], issues=True, full=True
+ )
+ current_issues_for_this_category = current_issues_for_this_category["reports"][
+ 0
+ ].get("items", {})
+
+ # Accept the given filter only if the criteria effectively match an existing issue
+ if not any(
+ issue_matches_criterias(i, criterias)
+ for i in current_issues_for_this_category
+ ):
+ raise YunohostError("No issues was found matching the given criteria.")
+
+ # Make sure the subdicts/lists exists
+ if "ignore_filters" not in configuration:
+ configuration["ignore_filters"] = {}
+ if category not in configuration["ignore_filters"]:
+ configuration["ignore_filters"][category] = []
+
+ if criterias in configuration["ignore_filters"][category]:
+ logger.warning("This filter already exists.")
+ return
+
+ configuration["ignore_filters"][category].append(criterias)
+ _diagnosis_write_configuration(configuration)
+ logger.success("Filter added")
+ return
+
+ if remove_filter:
+
+ category, criterias = validate_filter_criterias(remove_filter)
+
+ # Make sure the subdicts/lists exists
+ if "ignore_filters" not in configuration:
+ configuration["ignore_filters"] = {}
+ if category not in configuration["ignore_filters"]:
+ configuration["ignore_filters"][category] = []
+
+ if criterias not in configuration["ignore_filters"][category]:
+ raise YunohostValidationError("This filter does not exists.")
+
+ configuration["ignore_filters"][category].remove(criterias)
+ _diagnosis_write_configuration(configuration)
+ logger.success("Filter removed")
+ return
+
+
+def _diagnosis_read_configuration():
+ if not os.path.exists(DIAGNOSIS_CONFIG_FILE):
+ return {}
+
+ return read_yaml(DIAGNOSIS_CONFIG_FILE)
+
+
+def _diagnosis_write_configuration(conf):
+ write_to_yaml(DIAGNOSIS_CONFIG_FILE, conf)
+
+
+def issue_matches_criterias(issue, criterias):
+ """
+ e.g. an issue with:
+ meta:
+ domain: yolo.test
+ category: xmpp
+
+ matches the criterias {"domain": "yolo.test"}
+ """
+ for key, value in criterias.items():
+ if key not in issue["meta"]:
+ return False
+ if str(issue["meta"][key]) != value:
+ return False
+ return True
+
+
+def add_ignore_flag_to_issues(report):
+ """
+ Iterate over issues in a report, and flag them as ignored if they match an
+ ignored filter from the configuration
+
+ N.B. : for convenience. we want to make sure the "ignored" key is set for
+ every item in the report
+ """
+
+ ignore_filters = (
+ _diagnosis_read_configuration().get("ignore_filters", {}).get(report["id"], [])
+ )
+
+ for report_item in report["items"]:
+ report_item["ignored"] = False
+ if report_item["status"] not in ["WARNING", "ERROR"]:
+ continue
+ for criterias in ignore_filters:
+ if issue_matches_criterias(report_item, criterias):
+ report_item["ignored"] = True
+ break
+
+
+############################################################
+
+
+class Diagnoser:
+ def __init__(self, args, env, loggers):
+
+ # FIXME ? That stuff with custom loggers is weird ... (mainly inherited from the bash hooks, idk)
+ self.logger_debug, self.logger_warning, self.logger_info = loggers
+ self.env = env
+ self.args = args or {}
+ self.cache_file = Diagnoser.cache_file(self.id_)
+ self.description = Diagnoser.get_description(self.id_)
+
+ def cached_time_ago(self):
+
+ if not os.path.exists(self.cache_file):
+ return 99999999
+ return time.time() - os.path.getmtime(self.cache_file)
+
+ def write_cache(self, report):
+ if not os.path.exists(DIAGNOSIS_CACHE):
+ os.makedirs(DIAGNOSIS_CACHE)
+ return write_to_json(self.cache_file, report)
+
+ def diagnose(self):
+
+ if (
+ not self.args.get("force", False)
+ and self.cached_time_ago() < self.cache_duration
+ ):
+ self.logger_debug("Cache still valid : %s" % self.cache_file)
+ logger.info(
+ m18n.n("diagnosis_cache_still_valid", category=self.description)
+ )
+ return 0, {}
+
+ for dependency in self.dependencies:
+ dep_report = Diagnoser.get_cached_report(dependency)
+
+ if dep_report["timestamp"] == -1: # No cache yet for this dep
+ dep_errors = True
+ else:
+ dep_errors = [
+ item for item in dep_report["items"] if item["status"] == "ERROR"
+ ]
+
+ if dep_errors:
+ logger.error(
+ m18n.n(
+ "diagnosis_cant_run_because_of_dep",
+ category=self.description,
+ dep=Diagnoser.get_description(dependency),
+ )
+ )
+ return 1, {}
+
+ items = list(self.run())
+
+ for item in items:
+ if "details" in item and not item["details"]:
+ del item["details"]
+
+ new_report = {"id": self.id_, "cached_for": self.cache_duration, "items": items}
+
+ self.logger_debug("Updating cache %s" % self.cache_file)
+ self.write_cache(new_report)
+ Diagnoser.i18n(new_report)
+ add_ignore_flag_to_issues(new_report)
+
+ errors = [
+ item
+ for item in new_report["items"]
+ if item["status"] == "ERROR" and not item["ignored"]
+ ]
+ warnings = [
+ item
+ for item in new_report["items"]
+ if item["status"] == "WARNING" and not item["ignored"]
+ ]
+ errors_ignored = [
+ item
+ for item in new_report["items"]
+ if item["status"] == "ERROR" and item["ignored"]
+ ]
+ warning_ignored = [
+ item
+ for item in new_report["items"]
+ if item["status"] == "WARNING" and item["ignored"]
+ ]
+ ignored_msg = (
+ " "
+ + m18n.n(
+ "diagnosis_ignored_issues",
+ nb_ignored=len(errors_ignored + warning_ignored),
+ )
+ if errors_ignored or warning_ignored
+ else ""
+ )
+
+ if errors and warnings:
+ logger.error(
+ m18n.n(
+ "diagnosis_found_errors_and_warnings",
+ errors=len(errors),
+ warnings=len(warnings),
+ category=new_report["description"],
+ )
+ + ignored_msg
+ )
+ elif errors:
+ logger.error(
+ m18n.n(
+ "diagnosis_found_errors",
+ errors=len(errors),
+ category=new_report["description"],
+ )
+ + ignored_msg
+ )
+ elif warnings:
+ logger.warning(
+ m18n.n(
+ "diagnosis_found_warnings",
+ warnings=len(warnings),
+ category=new_report["description"],
+ )
+ + ignored_msg
+ )
+ else:
+ logger.success(
+ m18n.n("diagnosis_everything_ok", category=new_report["description"])
+ + ignored_msg
+ )
+
+ return 0, new_report
+
+ @staticmethod
+ def cache_file(id_):
+ return os.path.join(DIAGNOSIS_CACHE, "%s.json" % id_)
+
+ @staticmethod
+ def get_cached_report(id_, item=None, warn_if_no_cache=True):
+ cache_file = Diagnoser.cache_file(id_)
+ if not os.path.exists(cache_file):
+ if warn_if_no_cache:
+ logger.warning(m18n.n("diagnosis_no_cache", category=id_))
+ report = {"id": id_, "cached_for": -1, "timestamp": -1, "items": []}
+ else:
+ report = read_json(cache_file)
+ report["timestamp"] = int(os.path.getmtime(cache_file))
+
+ if item:
+ for report_item in report["items"]:
+ if report_item.get("meta") == item:
+ return report_item
+ return {}
+ else:
+ return report
+
+ @staticmethod
+ def get_description(id_):
+ key = "diagnosis_description_" + id_
+ # If no description available, fallback to id
+ return m18n.n(key) if m18n.key_exists(key) else id_
+
+ @staticmethod
+ def i18n(report, force_remove_html_tags=False):
+
+ # "Render" the strings with m18n.n
+ # N.B. : we do those m18n.n right now instead of saving the already-translated report
+ # because we can't be sure we'll redisplay the infos with the same locale as it
+ # was generated ... e.g. if the diagnosing happened inside a cron job with locale EN
+ # instead of FR used by the actual admin...
+
+ report["description"] = Diagnoser.get_description(report["id"])
+
+ for item in report["items"]:
+
+ # For the summary and each details, we want to call
+ # m18n() on the string, with the appropriate data for string
+ # formatting which can come from :
+ # - infos super-specific to the summary/details (if it's a tuple(key,dict_with_info) and not just a string)
+ # - 'meta' info = parameters of the test (e.g. which domain/category for DNS conf record)
+ # - actual 'data' retrieved from the test (e.g. actual global IP, ...)
+
+ meta_data = item.get("meta", {}).copy()
+ meta_data.update(item.get("data", {}))
+
+ html_tags = re.compile(r"<[^>]+>")
+
+ def m18n_(info):
+ if not isinstance(info, tuple) and not isinstance(info, list):
+ info = (info, {})
+ info[1].update(meta_data)
+ s = m18n.n(info[0], **(info[1]))
+ # In cli, we remove the html tags
+ if Moulinette.interface.type != "api" or force_remove_html_tags:
+ s = s.replace("", "'").replace(" ", "'")
+ s = html_tags.sub("", s.replace("
", "\n"))
+ else:
+ s = s.replace("", "").replace(
+ "
", "
"
+ )
+ # Make it so that links open in new tabs
+ s = s.replace(
+ "URL: %s
Status code: %s
"
+ % (url, r.status_code)
+ )
+ if r.status_code == 400:
+ raise Exception("Diagnosis request was refused: %s" % r.content)
+
+ try:
+ r = r.json()
+ except Exception as e:
+ raise Exception(
+ "Failed to parse json from diagnosis server response.\nError: %s\nOriginal content: %s"
+ % (e, r.content)
+ )
+
+ return r
+
+
+def _list_diagnosis_categories():
+ hooks_raw = hook_list("diagnosis", list_by="priority", show_info=True)["hooks"]
+ hooks = []
+ for _, some_hooks in sorted(hooks_raw.items(), key=lambda h: int(h[0])):
+ for name, info in some_hooks.items():
+ hooks.append((name, info["path"]))
+
+ return hooks
+
+
+def _email_diagnosis_issues():
+ from yunohost.domain import _get_maindomain
+
+ maindomain = _get_maindomain()
+ from_ = "diagnosis@%s (Automatic diagnosis on %s)" % (maindomain, maindomain)
+ to_ = "root"
+ subject_ = "Issues found by automatic diagnosis on %s" % maindomain
+
+ disclaimer = "The automatic diagnosis on your YunoHost server identified some issues on your server. You will find a description of the issues below. You can manage those issues in the 'Diagnosis' section in your webadmin."
+
+ issues = diagnosis_show(issues=True)["reports"]
+ if not issues:
+ return
+
+ content = _dump_human_readable_reports(issues)
+
+ message = """\
+From: %s
+To: %s
+Subject: %s
+
+%s
+
+---
+
+%s
+""" % (
+ from_,
+ to_,
+ subject_,
+ disclaimer,
+ content,
+ )
+
+ import smtplib
+
+ smtp = smtplib.SMTP("localhost")
+ smtp.sendmail(from_, [to_], message.encode("utf-8"))
+ smtp.quit()
diff --git a/src/yunohost/dns.py b/src/yunohost/dns.py
new file mode 100644
index 000000000..0581fa82c
--- /dev/null
+++ b/src/yunohost/dns.py
@@ -0,0 +1,1002 @@
+# -*- coding: utf-8 -*-
+
+""" License
+
+ Copyright (C) 2013 YunoHost
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published
+ by the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program; if not, see http://www.gnu.org/licenses
+
+"""
+
+""" yunohost_domain.py
+
+ Manage domains
+"""
+import os
+import re
+import time
+
+from difflib import SequenceMatcher
+from collections import OrderedDict
+
+from moulinette import m18n, Moulinette
+from moulinette.utils.log import getActionLogger
+from moulinette.utils.filesystem import read_file, write_to_file, read_toml
+
+from yunohost.domain import (
+ domain_list,
+ _assert_domain_exists,
+ domain_config_get,
+ _get_domain_settings,
+ _set_domain_settings,
+)
+from yunohost.utils.dns import dig, YNH_DYNDNS_DOMAINS
+from yunohost.utils.error import YunohostValidationError, YunohostError
+from yunohost.utils.network import get_public_ip
+from yunohost.log import is_unit_operation
+from yunohost.hook import hook_callback
+
+logger = getActionLogger("yunohost.domain")
+
+DOMAIN_REGISTRAR_LIST_PATH = "/usr/share/yunohost/other/registrar_list.toml"
+
+
+def domain_dns_suggest(domain):
+ """
+ Generate DNS configuration for a domain
+
+ Keyword argument:
+ domain -- Domain name
+
+ """
+
+ _assert_domain_exists(domain)
+
+ dns_conf = _build_dns_conf(domain)
+
+ result = ""
+
+ if dns_conf["basic"]:
+ result += "; Basic ipv4/ipv6 records"
+ for record in dns_conf["basic"]:
+ result += "\n{name} {ttl} IN {type} {value}".format(**record)
+
+ if dns_conf["mail"]:
+ result += "\n\n"
+ result += "; Mail"
+ for record in dns_conf["mail"]:
+ result += "\n{name} {ttl} IN {type} {value}".format(**record)
+ result += "\n\n"
+
+ if dns_conf["xmpp"]:
+ result += "\n\n"
+ result += "; XMPP"
+ for record in dns_conf["xmpp"]:
+ result += "\n{name} {ttl} IN {type} {value}".format(**record)
+
+ if dns_conf["extra"]:
+ result += "; Extra"
+ for record in dns_conf["extra"]:
+ result += "\n{name} {ttl} IN {type} {value}".format(**record)
+
+ for name, record_list in dns_conf.items():
+ if name not in ("basic", "xmpp", "mail", "extra") and record_list:
+ result += "\n\n"
+ result += "; " + name
+ for record in record_list:
+ result += "\n{name} {ttl} IN {type} {value}".format(**record)
+
+ if Moulinette.interface.type == "cli":
+ # FIXME Update this to point to our "dns push" doc
+ logger.info(m18n.n("domain_dns_conf_is_just_a_recommendation"))
+
+ return result
+
+
+def _list_subdomains_of(parent_domain):
+
+ _assert_domain_exists(parent_domain)
+
+ out = []
+ for domain in domain_list()["domains"]:
+ if domain.endswith(f".{parent_domain}"):
+ out.append(domain)
+
+ return out
+
+
+def _build_dns_conf(base_domain, include_empty_AAAA_if_no_ipv6=False):
+ """
+ Internal function that will returns a data structure containing the needed
+ information to generate/adapt the dns configuration
+
+ Arguments:
+ domains -- List of a domain and its subdomains
+
+ The returned datastructure will have the following form:
+ {
+ "basic": [
+ # if ipv4 available
+ {"type": "A", "name": "@", "value": "123.123.123.123", "ttl": 3600},
+ # if ipv6 available
+ {"type": "AAAA", "name": "@", "value": "valid-ipv6", "ttl": 3600},
+ ],
+ "xmpp": [
+ {"type": "SRV", "name": "_xmpp-client._tcp", "value": "0 5 5222 domain.tld.", "ttl": 3600},
+ {"type": "SRV", "name": "_xmpp-server._tcp", "value": "0 5 5269 domain.tld.", "ttl": 3600},
+ {"type": "CNAME", "name": "muc", "value": "@", "ttl": 3600},
+ {"type": "CNAME", "name": "pubsub", "value": "@", "ttl": 3600},
+ {"type": "CNAME", "name": "vjud", "value": "@", "ttl": 3600}
+ {"type": "CNAME", "name": "xmpp-upload", "value": "@", "ttl": 3600}
+ ],
+ "mail": [
+ {"type": "MX", "name": "@", "value": "10 domain.tld.", "ttl": 3600},
+ {"type": "TXT", "name": "@", "value": "\"v=spf1 a mx ip4:123.123.123.123 ipv6:valid-ipv6 -all\"", "ttl": 3600 },
+ {"type": "TXT", "name": "mail._domainkey", "value": "\"v=DKIM1; k=rsa; p=some-super-long-key\"", "ttl": 3600},
+ {"type": "TXT", "name": "_dmarc", "value": "\"v=DMARC1; p=none\"", "ttl": 3600}
+ ],
+ "extra": [
+ # if ipv4 available
+ {"type": "A", "name": "*", "value": "123.123.123.123", "ttl": 3600},
+ # if ipv6 available
+ {"type": "AAAA", "name": "*", "value": "valid-ipv6", "ttl": 3600},
+ {"type": "CAA", "name": "@", "value": "128 issue \"letsencrypt.org\"", "ttl": 3600},
+ ],
+ "example_of_a_custom_rule": [
+ {"type": "SRV", "name": "_matrix", "value": "domain.tld.", "ttl": 3600}
+ ],
+ }
+ """
+
+ basic = []
+ mail = []
+ xmpp = []
+ extra = []
+ ipv4 = get_public_ip()
+ ipv6 = get_public_ip(6)
+
+ # If this is a ynh_dyndns_domain, we're not gonna include all the subdomains in the conf
+ # Because dynette only accept a specific list of name/type
+ # And the wildcard */A already covers the bulk of use cases
+ if any(
+ base_domain.endswith("." + ynh_dyndns_domain)
+ for ynh_dyndns_domain in YNH_DYNDNS_DOMAINS
+ ):
+ subdomains = []
+ else:
+ subdomains = _list_subdomains_of(base_domain)
+
+ domains_settings = {
+ domain: domain_config_get(domain, export=True)
+ for domain in [base_domain] + subdomains
+ }
+
+ base_dns_zone = _get_dns_zone_for_domain(base_domain)
+
+ for domain, settings in domains_settings.items():
+
+ # Domain # Base DNS zone # Basename # Suffix #
+ # ------------------ # ----------------- # --------- # -------- #
+ # domain.tld # domain.tld # @ # #
+ # sub.domain.tld # domain.tld # sub # .sub #
+ # foo.sub.domain.tld # domain.tld # foo.sub # .foo.sub #
+ # sub.domain.tld # sub.domain.tld # @ # #
+ # foo.sub.domain.tld # sub.domain.tld # foo # .foo #
+
+ basename = domain.replace(base_dns_zone, "").rstrip(".") or "@"
+ suffix = f".{basename}" if basename != "@" else ""
+
+ # ttl = settings["ttl"]
+ ttl = 3600
+
+ ###########################
+ # Basic ipv4/ipv6 records #
+ ###########################
+ if ipv4:
+ basic.append([basename, ttl, "A", ipv4])
+
+ if ipv6:
+ basic.append([basename, ttl, "AAAA", ipv6])
+ elif include_empty_AAAA_if_no_ipv6:
+ basic.append([basename, ttl, "AAAA", None])
+
+ #########
+ # Email #
+ #########
+ if settings["mail_in"]:
+ mail.append([basename, ttl, "MX", f"10 {domain}."])
+
+ if settings["mail_out"]:
+ mail.append([basename, ttl, "TXT", '"v=spf1 a mx -all"'])
+
+ # DKIM/DMARC record
+ dkim_host, dkim_publickey = _get_DKIM(domain)
+
+ if dkim_host:
+ mail += [
+ [f"{dkim_host}{suffix}", ttl, "TXT", dkim_publickey],
+ [f"_dmarc{suffix}", ttl, "TXT", '"v=DMARC1; p=none"'],
+ ]
+
+ ########
+ # XMPP #
+ ########
+ if settings["xmpp"]:
+ xmpp += [
+ [
+ f"_xmpp-client._tcp{suffix}",
+ ttl,
+ "SRV",
+ f"0 5 5222 {domain}.",
+ ],
+ [
+ f"_xmpp-server._tcp{suffix}",
+ ttl,
+ "SRV",
+ f"0 5 5269 {domain}.",
+ ],
+ [f"muc{suffix}", ttl, "CNAME", basename],
+ [f"pubsub{suffix}", ttl, "CNAME", basename],
+ [f"vjud{suffix}", ttl, "CNAME", basename],
+ [f"xmpp-upload{suffix}", ttl, "CNAME", basename],
+ ]
+
+ #########
+ # Extra #
+ #########
+
+ # Only recommend wildcard and CAA for the top level
+ if domain == base_domain:
+ if ipv4:
+ extra.append([f"*{suffix}", ttl, "A", ipv4])
+
+ if ipv6:
+ extra.append([f"*{suffix}", ttl, "AAAA", ipv6])
+ elif include_empty_AAAA_if_no_ipv6:
+ extra.append([f"*{suffix}", ttl, "AAAA", None])
+
+ extra.append([basename, ttl, "CAA", '128 issue "letsencrypt.org"'])
+
+ ####################
+ # Standard records #
+ ####################
+
+ records = {
+ "basic": [
+ {"name": name, "ttl": ttl_, "type": type_, "value": value}
+ for name, ttl_, type_, value in basic
+ ],
+ "xmpp": [
+ {"name": name, "ttl": ttl_, "type": type_, "value": value}
+ for name, ttl_, type_, value in xmpp
+ ],
+ "mail": [
+ {"name": name, "ttl": ttl_, "type": type_, "value": value}
+ for name, ttl_, type_, value in mail
+ ],
+ "extra": [
+ {"name": name, "ttl": ttl_, "type": type_, "value": value}
+ for name, ttl_, type_, value in extra
+ ],
+ }
+
+ ##################
+ # Custom records #
+ ##################
+
+ # Defined by custom hooks ships in apps for example ...
+
+ hook_results = hook_callback("custom_dns_rules", args=[base_domain])
+ for hook_name, results in hook_results.items():
+ #
+ # There can be multiple results per hook name, so results look like
+ # {'/some/path/to/hook1':
+ # { 'state': 'succeed',
+ # 'stdreturn': [{'type': 'SRV',
+ # 'name': 'stuff.foo.bar.',
+ # 'value': 'yoloswag',
+ # 'ttl': 3600}]
+ # },
+ # '/some/path/to/hook2':
+ # { ... },
+ # [...]
+ #
+ # Loop over the sub-results
+ custom_records = [
+ v["stdreturn"] for v in results.values() if v and v["stdreturn"]
+ ]
+
+ records[hook_name] = []
+ for record_list in custom_records:
+ # Check that record_list is indeed a list of dict
+ # with the required keys
+ if (
+ not isinstance(record_list, list)
+ or any(not isinstance(record, dict) for record in record_list)
+ or any(
+ key not in record
+ for record in record_list
+ for key in ["name", "ttl", "type", "value"]
+ )
+ ):
+ # Display an error, mainly for app packagers trying to implement a hook
+ logger.warning(
+ "Ignored custom record from hook '%s' because the data is not a *list* of dict with keys name, ttl, type and value. Raw data : %s"
+ % (hook_name, record_list)
+ )
+ continue
+
+ records[hook_name].extend(record_list)
+
+ return records
+
+
+def _get_DKIM(domain):
+ DKIM_file = "/etc/dkim/{domain}.mail.txt".format(domain=domain)
+
+ if not os.path.isfile(DKIM_file):
+ return (None, None)
+
+ with open(DKIM_file) as f:
+ dkim_content = f.read()
+
+ # Gotta manage two formats :
+ #
+ # Legacy
+ # -----
+ #
+ # mail._domainkey IN TXT ( "v=DKIM1; k=rsa; "
+ # "p=" )
+ #
+ # New
+ # ------
+ #
+ # mail._domainkey IN TXT ( "v=DKIM1; h=sha256; k=rsa; "
+ # "p=" )
+
+ is_legacy_format = " h=sha256; " not in dkim_content
+
+ # Legacy DKIM format
+ if is_legacy_format:
+ dkim = re.match(
+ (
+ r"^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+"
+ r'[^"]*"v=(?P[^";]+);'
+ r'[\s"]*k=(?P[^";]+);'
+ r'[\s"]*p=(?P[^";]+)'
+ ),
+ dkim_content,
+ re.M | re.S,
+ )
+ else:
+ dkim = re.match(
+ (
+ r"^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+"
+ r'[^"]*"v=(?P[^";]+);'
+ r'[\s"]*h=(?P[^";]+);'
+ r'[\s"]*k=(?P[^";]+);'
+ r'[\s"]*p=(?P[^";]+)'
+ ),
+ dkim_content,
+ re.M | re.S,
+ )
+
+ if not dkim:
+ return (None, None)
+
+ if is_legacy_format:
+ return (
+ dkim.group("host"),
+ '"v={v}; k={k}; p={p}"'.format(
+ v=dkim.group("v"), k=dkim.group("k"), p=dkim.group("p")
+ ),
+ )
+ else:
+ return (
+ dkim.group("host"),
+ '"v={v}; h={h}; k={k}; p={p}"'.format(
+ v=dkim.group("v"),
+ h=dkim.group("h"),
+ k=dkim.group("k"),
+ p=dkim.group("p"),
+ ),
+ )
+
+
+def _get_dns_zone_for_domain(domain):
+ """
+ Get the DNS zone of a domain
+
+ Keyword arguments:
+ domain -- The domain name
+
+ """
+
+ # First, check if domain is a nohost.me / noho.st / ynh.fr
+ # This is mainly meant to speed up things for "dyndns update"
+ # ... otherwise we end up constantly doing a bunch of dig requests
+ for ynh_dyndns_domain in YNH_DYNDNS_DOMAINS:
+ if domain.endswith("." + ynh_dyndns_domain):
+ return ynh_dyndns_domain
+
+ # Check cache
+ cache_folder = "/var/cache/yunohost/dns_zones"
+ cache_file = f"{cache_folder}/{domain}"
+ cache_duration = 3600 # one hour
+ if (
+ os.path.exists(cache_file)
+ and abs(os.path.getctime(cache_file) - time.time()) < cache_duration
+ ):
+ dns_zone = read_file(cache_file).strip()
+ if dns_zone:
+ return dns_zone
+
+ # Check cache for parent domain
+ # This is another strick to try to prevent this function from being
+ # a bottleneck on system with 1 main domain + 10ish subdomains
+ # when building the dns conf for the main domain (which will call domain_config_get, etc...)
+ parent_domain = domain.split(".", 1)[1]
+ if parent_domain in domain_list()["domains"]:
+ parent_cache_file = f"{cache_folder}/{parent_domain}"
+ if (
+ os.path.exists(parent_cache_file)
+ and abs(os.path.getctime(parent_cache_file) - time.time()) < cache_duration
+ ):
+ dns_zone = read_file(parent_cache_file).strip()
+ if dns_zone:
+ return dns_zone
+
+ # For foo.bar.baz.gni we want to scan all the parent domains
+ # (including the domain itself)
+ # foo.bar.baz.gni
+ # bar.baz.gni
+ # baz.gni
+ # gni
+ # Until we find the first one that has a NS record
+ parent_list = [domain.split(".", i)[-1] for i, _ in enumerate(domain.split("."))]
+
+ for parent in parent_list:
+
+ # Check if there's a NS record for that domain
+ answer = dig(parent, rdtype="NS", full_answers=True, resolvers="force_external")
+ if answer[0] == "ok":
+ os.system(f"mkdir -p {cache_folder}")
+ write_to_file(cache_file, parent)
+ return parent
+
+ if len(parent_list) >= 2:
+ zone = parent_list[-2]
+ else:
+ zone = parent_list[-1]
+
+ logger.warning(
+ f"Could not identify the dns zone for domain {domain}, returning {zone}"
+ )
+ return zone
+
+
+def _get_registrar_config_section(domain):
+
+ from lexicon.providers.auto import _relevant_provider_for_domain
+
+ registrar_infos = {}
+
+ dns_zone = _get_dns_zone_for_domain(domain)
+
+ # If parent domain exists in yunohost
+ parent_domain = domain.split(".", 1)[1]
+ if parent_domain in domain_list()["domains"]:
+
+ # Dirty hack to have a link on the webadmin
+ if Moulinette.interface.type == "api":
+ parent_domain_link = f"[{parent_domain}](#/domains/{parent_domain}/config)"
+ else:
+ parent_domain_link = parent_domain
+
+ registrar_infos["registrar"] = OrderedDict(
+ {
+ "type": "alert",
+ "style": "info",
+ "ask": m18n.n(
+ "domain_dns_registrar_managed_in_parent_domain",
+ parent_domain=domain,
+ parent_domain_link=parent_domain_link,
+ ),
+ "value": "parent_domain",
+ }
+ )
+ return OrderedDict(registrar_infos)
+
+ # TODO big project, integrate yunohost's dynette as a registrar-like provider
+ # TODO big project, integrate other dyndns providers such as netlib.re, or cf the list of dyndns providers supported by cloudron...
+ if dns_zone in YNH_DYNDNS_DOMAINS:
+ registrar_infos["registrar"] = OrderedDict(
+ {
+ "type": "alert",
+ "style": "success",
+ "ask": m18n.n("domain_dns_registrar_yunohost"),
+ "value": "yunohost",
+ }
+ )
+ return OrderedDict(registrar_infos)
+
+ try:
+ registrar = _relevant_provider_for_domain(dns_zone)[0]
+ except ValueError:
+ registrar_infos["registrar"] = OrderedDict(
+ {
+ "type": "alert",
+ "style": "warning",
+ "ask": m18n.n("domain_dns_registrar_not_supported"),
+ "value": None,
+ }
+ )
+ else:
+
+ registrar_infos["registrar"] = OrderedDict(
+ {
+ "type": "alert",
+ "style": "info",
+ "ask": m18n.n("domain_dns_registrar_supported", registrar=registrar),
+ "value": registrar,
+ }
+ )
+
+ TESTED_REGISTRARS = ["ovh", "gandi"]
+ if registrar not in TESTED_REGISTRARS:
+ registrar_infos["experimental_disclaimer"] = OrderedDict(
+ {
+ "type": "alert",
+ "style": "danger",
+ "ask": m18n.n(
+ "domain_dns_registrar_experimental", registrar=registrar
+ ),
+ }
+ )
+
+ # TODO : add a help tip with the link to the registar's API doc (c.f. Lexicon's README)
+ registrar_list = read_toml(DOMAIN_REGISTRAR_LIST_PATH)
+ registrar_credentials = registrar_list[registrar]
+ for credential, infos in registrar_credentials.items():
+ infos["default"] = infos.get("default", "")
+ infos["optional"] = infos.get("optional", "False")
+ registrar_infos.update(registrar_credentials)
+
+ return OrderedDict(registrar_infos)
+
+
+def _get_registar_settings(domain):
+
+ _assert_domain_exists(domain)
+
+ settings = domain_config_get(domain, key="dns.registrar", export=True)
+
+ registrar = settings.pop("registrar")
+
+ if "experimental_disclaimer" in settings:
+ settings.pop("experimental_disclaimer")
+
+ return registrar, settings
+
+
+@is_unit_operation()
+def domain_dns_push(operation_logger, domain, dry_run=False, force=False, purge=False):
+ """
+ Send DNS records to the previously-configured registrar of the domain.
+ """
+
+ from lexicon.client import Client as LexiconClient
+ from lexicon.config import ConfigResolver as LexiconConfigResolver
+
+ registrar, registrar_credentials = _get_registar_settings(domain)
+
+ _assert_domain_exists(domain)
+
+ if not registrar or registrar == "None": # yes it's None as a string
+ raise YunohostValidationError("domain_dns_push_not_applicable", domain=domain)
+
+ # FIXME: in the future, properly unify this with yunohost dyndns update
+ if registrar == "yunohost":
+ logger.info(m18n.n("domain_dns_registrar_yunohost"))
+ return {}
+
+ if registrar == "parent_domain":
+ parent_domain = domain.split(".", 1)[1]
+ registar, registrar_credentials = _get_registar_settings(parent_domain)
+ if any(registrar_credentials.values()):
+ raise YunohostValidationError(
+ "domain_dns_push_managed_in_parent_domain",
+ domain=domain,
+ parent_domain=parent_domain,
+ )
+ else:
+ raise YunohostValidationError(
+ "domain_registrar_is_not_configured", domain=parent_domain
+ )
+
+ if not all(registrar_credentials.values()):
+ raise YunohostValidationError(
+ "domain_registrar_is_not_configured", domain=domain
+ )
+
+ base_dns_zone = _get_dns_zone_for_domain(domain)
+
+ # Convert the generated conf into a format that matches what we'll fetch using the API
+ # Makes it easier to compare "wanted records" with "current records on remote"
+ wanted_records = []
+ for records in _build_dns_conf(domain).values():
+ for record in records:
+
+ # Make sure the name is a FQDN
+ name = (
+ f"{record['name']}.{base_dns_zone}"
+ if record["name"] != "@"
+ else base_dns_zone
+ )
+ type_ = record["type"]
+ content = record["value"]
+
+ # Make sure the content is also a FQDN (with trailing . ?)
+ if content == "@" and record["type"] == "CNAME":
+ content = base_dns_zone + "."
+
+ wanted_records.append(
+ {"name": name, "type": type_, "ttl": record["ttl"], "content": content}
+ )
+
+ # FIXME Lexicon does not support CAA records
+ # See https://github.com/AnalogJ/lexicon/issues/282 and https://github.com/AnalogJ/lexicon/pull/371
+ # They say it's trivial to implement it!
+ # And yet, it is still not done/merged
+ # Update by Aleks: it works - at least with Gandi ?!
+ # wanted_records = [record for record in wanted_records if record["type"] != "CAA"]
+
+ if purge:
+ wanted_records = []
+ force = True
+
+ # Construct the base data structure to use lexicon's API.
+
+ base_config = {
+ "provider_name": registrar,
+ "domain": base_dns_zone,
+ registrar: registrar_credentials,
+ }
+
+ # Ugly hack to be able to fetch all record types at once:
+ # we initialize a LexiconClient with a dummy type "all"
+ # (which lexicon doesnt actually understands)
+ # then trigger ourselves the authentication + list_records
+ # instead of calling .execute()
+ query = (
+ LexiconConfigResolver()
+ .with_dict(dict_object=base_config)
+ .with_dict(dict_object={"action": "list", "type": "all"})
+ )
+ client = LexiconClient(query)
+ try:
+ client.provider.authenticate()
+ except Exception as e:
+ raise YunohostValidationError(
+ "domain_dns_push_failed_to_authenticate", domain=domain, error=str(e)
+ )
+
+ try:
+ current_records = client.provider.list_records()
+ except Exception as e:
+ raise YunohostError("domain_dns_push_failed_to_list", error=str(e))
+
+ managed_dns_records_hashes = _get_managed_dns_records_hashes(domain)
+
+ # Keep only records for relevant types: A, AAAA, MX, TXT, CNAME, SRV
+ relevant_types = ["A", "AAAA", "MX", "TXT", "CNAME", "SRV", "CAA"]
+ current_records = [r for r in current_records if r["type"] in relevant_types]
+
+ # Ignore records which are for a higher-level domain
+ # i.e. we don't care about the records for domain.tld when pushing yuno.domain.tld
+ current_records = [
+ r
+ for r in current_records
+ if r["name"].endswith(f".{domain}") or r["name"] == domain
+ ]
+
+ for record in current_records:
+
+ # Try to get rid of weird stuff like ".domain.tld" or "@.domain.tld"
+ record["name"] = record["name"].strip("@").strip(".")
+
+ # Some API return '@' in content and we shall convert it to absolute/fqdn
+ record["content"] = (
+ record["content"]
+ .replace("@.", base_dns_zone + ".")
+ .replace("@", base_dns_zone + ".")
+ )
+
+ if record["type"] == "TXT":
+ if not record["content"].startswith('"'):
+ record["content"] = '"' + record["content"]
+ if not record["content"].endswith('"'):
+ record["content"] = record["content"] + '"'
+
+ # Check if this record was previously set by YunoHost
+ record["managed_by_yunohost"] = (
+ _hash_dns_record(record) in managed_dns_records_hashes
+ )
+
+ # Step 0 : Get the list of unique (type, name)
+ # And compare the current and wanted records
+ #
+ # i.e. we want this kind of stuff:
+ # wanted current
+ # (A, .domain.tld) 1.2.3.4 1.2.3.4
+ # (A, www.domain.tld) 1.2.3.4 5.6.7.8
+ # (A, foobar.domain.tld) 1.2.3.4
+ # (AAAA, .domain.tld) 2001::abcd
+ # (MX, .domain.tld) 10 domain.tld [10 mx1.ovh.net, 20 mx2.ovh.net]
+ # (TXT, .domain.tld) "v=spf1 ..." ["v=spf1", "foobar"]
+ # (SRV, .domain.tld) 0 5 5269 domain.tld
+ changes = {"delete": [], "update": [], "create": [], "unchanged": []}
+
+ type_and_names = sorted(
+ set([(r["type"], r["name"]) for r in current_records + wanted_records])
+ )
+ comparison = {
+ type_and_name: {"current": [], "wanted": []} for type_and_name in type_and_names
+ }
+
+ for record in current_records:
+ comparison[(record["type"], record["name"])]["current"].append(record)
+
+ for record in wanted_records:
+ comparison[(record["type"], record["name"])]["wanted"].append(record)
+
+ for type_and_name, records in comparison.items():
+
+ #
+ # Step 1 : compute a first "diff" where we remove records which are the same on both sides
+ #
+ wanted_contents = [r["content"] for r in records["wanted"]]
+ current_contents = [r["content"] for r in records["current"]]
+
+ current = [r for r in records["current"] if r["content"] not in wanted_contents]
+ wanted = [r for r in records["wanted"] if r["content"] not in current_contents]
+
+ #
+ # Step 2 : simple case: 0 record on one side, 0 on the other
+ # -> either nothing do (0/0) or creations (0/N) or deletions (N/0)
+ #
+ if len(current) == 0 and len(wanted) == 0:
+ # No diff, nothing to do
+ changes["unchanged"].extend(records["current"])
+ continue
+
+ elif len(wanted) == 0:
+ changes["delete"].extend(current)
+ continue
+
+ elif len(current) == 0:
+ changes["create"].extend(wanted)
+ continue
+
+ #
+ # Step 3 : N record on one side, M on the other
+ #
+ # Fuzzy matching strategy:
+ # For each wanted record, try to find a current record which looks like the wanted one
+ # -> if found, trigger an update
+ # -> if no match found, trigger a create
+ #
+ for record in wanted:
+
+ def likeliness(r):
+ # We compute this only on the first 100 chars, to have a high value even for completely different DKIM keys
+ return SequenceMatcher(
+ None, r["content"][:100], record["content"][:100]
+ ).ratio()
+
+ matches = sorted(current, key=lambda r: likeliness(r), reverse=True)
+ if matches and likeliness(matches[0]) > 0.50:
+ match = matches[0]
+ # Remove the match from 'current' so that it's not added to the removed stuff later
+ current.remove(match)
+ match["old_content"] = match["content"]
+ match["content"] = record["content"]
+ changes["update"].append(match)
+ else:
+ changes["create"].append(record)
+
+ #
+ # For all other remaining current records:
+ # -> trigger deletions
+ #
+ for record in current:
+ changes["delete"].append(record)
+
+ def relative_name(name):
+ name = name.strip(".")
+ name = name.replace("." + base_dns_zone, "")
+ name = name.replace(base_dns_zone, "@")
+ return name
+
+ def human_readable_record(action, record):
+ name = relative_name(record["name"])
+ name = name[:20]
+ t = record["type"]
+
+ if not force and action in ["update", "delete"]:
+ ignored = (
+ ""
+ if record["managed_by_yunohost"]
+ else "(ignored, won't be changed by Yunohost unless forced)"
+ )
+ else:
+ ignored = ""
+
+ if action == "create":
+ old_content = record.get("old_content", "(None)")[:30]
+ new_content = record.get("content", "(None)")[:30]
+ return f"{name:>20} [{t:^5}] {new_content:^30} {ignored}"
+ elif action == "update":
+ old_content = record.get("old_content", "(None)")[:30]
+ new_content = record.get("content", "(None)")[:30]
+ return (
+ f"{name:>20} [{t:^5}] {old_content:^30} -> {new_content:^30} {ignored}"
+ )
+ elif action == "unchanged":
+ old_content = new_content = record.get("content", "(None)")[:30]
+ return f"{name:>20} [{t:^5}] {old_content:^30}"
+ else:
+ old_content = record.get("content", "(None)")[:30]
+ return f"{name:>20} [{t:^5}] {old_content:^30} {ignored}"
+
+ if dry_run:
+ if Moulinette.interface.type == "api":
+ for records in changes.values():
+ for record in records:
+ record["name"] = relative_name(record["name"])
+ return changes
+ else:
+ out = {"delete": [], "create": [], "update": [], "unchanged": []}
+ for action in ["delete", "create", "update", "unchanged"]:
+ for record in changes[action]:
+ out[action].append(human_readable_record(action, record))
+
+ return out
+
+ # If --force ain't used, we won't delete/update records not managed by yunohost
+ if not force:
+ for action in ["delete", "update"]:
+ changes[action] = [r for r in changes[action] if r["managed_by_yunohost"]]
+
+ def progress(info=""):
+ progress.nb += 1
+ width = 20
+ bar = int(progress.nb * width / progress.total)
+ bar = "[" + "#" * bar + "." * (width - bar) + "]"
+ if info:
+ bar += " > " + info
+ if progress.old == bar:
+ return
+ progress.old = bar
+ logger.info(bar)
+
+ progress.nb = 0
+ progress.old = ""
+ progress.total = len(changes["delete"] + changes["create"] + changes["update"])
+
+ if progress.total == 0:
+ logger.success(m18n.n("domain_dns_push_already_up_to_date"))
+ return {}
+
+ #
+ # Actually push the records
+ #
+
+ operation_logger.start()
+ logger.info(m18n.n("domain_dns_pushing"))
+
+ new_managed_dns_records_hashes = [_hash_dns_record(r) for r in changes["unchanged"]]
+ results = {"warnings": [], "errors": []}
+
+ for action in ["delete", "create", "update"]:
+
+ for record in changes[action]:
+
+ relative_name = record["name"].replace(base_dns_zone, "").rstrip(".") or "@"
+ progress(
+ f"{action} {record['type']:^5} / {relative_name}"
+ ) # FIXME: i18n but meh
+
+ # Apparently Lexicon yields us some 'id' during fetch
+ # But wants 'identifier' during push ...
+ if "id" in record:
+ record["identifier"] = record["id"]
+ del record["id"]
+
+ if registrar == "godaddy":
+ if record["name"] == base_dns_zone:
+ record["name"] = "@." + record["name"]
+ if record["type"] in ["MX", "SRV", "CAA"]:
+ logger.warning(
+ f"Pushing {record['type']} records is not properly supported by Lexicon/Godaddy."
+ )
+ results["warnings"].append(
+ f"Pushing {record['type']} records is not properly supported by Lexicon/Godaddy."
+ )
+ continue
+
+ record["action"] = action
+ query = (
+ LexiconConfigResolver()
+ .with_dict(dict_object=base_config)
+ .with_dict(dict_object=record)
+ )
+
+ try:
+ result = LexiconClient(query).execute()
+ except Exception as e:
+ msg = m18n.n(
+ "domain_dns_push_record_failed",
+ action=action,
+ type=record["type"],
+ name=record["name"],
+ error=str(e),
+ )
+ logger.error(msg)
+ results["errors"].append(msg)
+ else:
+ if result:
+ new_managed_dns_records_hashes.append(_hash_dns_record(record))
+ else:
+ msg = m18n.n(
+ "domain_dns_push_record_failed",
+ action=action,
+ type=record["type"],
+ name=record["name"],
+ error="unkonwn error?",
+ )
+ logger.error(msg)
+ results["errors"].append(msg)
+
+ _set_managed_dns_records_hashes(domain, new_managed_dns_records_hashes)
+
+ # Everything succeeded
+ if len(results["errors"]) + len(results["warnings"]) == 0:
+ logger.success(m18n.n("domain_dns_push_success"))
+ return {}
+ # Everything failed
+ elif len(results["errors"]) + len(results["warnings"]) == progress.total:
+ logger.error(m18n.n("domain_dns_push_failed"))
+ else:
+ logger.warning(m18n.n("domain_dns_push_partial_failure"))
+
+ return results
+
+
+def _get_managed_dns_records_hashes(domain: str) -> list:
+ return _get_domain_settings(domain).get("managed_dns_records_hashes", [])
+
+
+def _set_managed_dns_records_hashes(domain: str, hashes: list) -> None:
+ settings = _get_domain_settings(domain)
+ settings["managed_dns_records_hashes"] = hashes or []
+ _set_domain_settings(domain, settings)
+
+
+def _hash_dns_record(record: dict) -> int:
+
+ fields = ["name", "type", "content"]
+ record_ = {f: record.get(f) for f in fields}
+
+ return hash(frozenset(record_.items()))
diff --git a/src/yunohost/domain.py b/src/yunohost/domain.py
index 42a4881ba..1f96ced8a 100644
--- a/src/yunohost/domain.py
+++ b/src/yunohost/domain.py
@@ -24,44 +24,85 @@
Manage domains
"""
import os
-import re
-import yaml
+from typing import Dict, Any
-from moulinette import m18n, msettings
+from moulinette import m18n, Moulinette
from moulinette.core import MoulinetteError
-from yunohost.utils.error import YunohostError
from moulinette.utils.log import getActionLogger
+from moulinette.utils.filesystem import write_to_file, read_yaml, write_to_yaml
-import yunohost.certificate
-
-from yunohost.regenconf import regen_conf
-from yunohost.utils.network import get_public_ip
+from yunohost.app import (
+ app_ssowatconf,
+ _installed_apps,
+ _get_app_settings,
+ _get_conflicting_apps,
+)
+from yunohost.regenconf import regen_conf, _force_clear_hashes, _process_regen_conf
+from yunohost.utils.config import ConfigPanel, Question
+from yunohost.utils.error import YunohostError, YunohostValidationError
from yunohost.log import is_unit_operation
-from yunohost.hook import hook_callback
-logger = getActionLogger('yunohost.domain')
+logger = getActionLogger("yunohost.domain")
+
+DOMAIN_CONFIG_PATH = "/usr/share/yunohost/other/config_domain.toml"
+DOMAIN_SETTINGS_DIR = "/etc/yunohost/domains"
+
+# Lazy dev caching to avoid re-query ldap every time we need the domain list
+domain_list_cache: Dict[str, Any] = {}
-def domain_list():
+def domain_list(exclude_subdomains=False):
"""
List domains
Keyword argument:
- filter -- LDAP filter used to search
- offset -- Starting number for domain fetching
- limit -- Maximum number of domain fetched
+ exclude_subdomains -- Filter out domains that are subdomains of other declared domains
"""
+ global domain_list_cache
+ if not exclude_subdomains and domain_list_cache:
+ return domain_list_cache
+
from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface()
- result = ldap.search('ou=domains,dc=yunohost,dc=org', 'virtualdomain=*', ['virtualdomain'])
+ result = [
+ entry["virtualdomain"][0]
+ for entry in ldap.search(
+ "ou=domains,dc=yunohost,dc=org", "virtualdomain=*", ["virtualdomain"]
+ )
+ ]
result_list = []
for domain in result:
- result_list.append(domain['virtualdomain'][0])
+ if exclude_subdomains:
+ parent_domain = domain.split(".", 1)[1]
+ if parent_domain in result:
+ continue
- return {'domains': result_list}
+ result_list.append(domain)
+
+ def cmp_domain(domain):
+ # Keep the main part of the domain and the extension together
+ # eg: this.is.an.example.com -> ['example.com', 'an', 'is', 'this']
+ domain = domain.split(".")
+ domain[-1] = domain[-2] + domain.pop()
+ domain = list(reversed(domain))
+ return domain
+
+ result_list = sorted(result_list, key=cmp_domain)
+
+ # Don't cache answer if using exclude_subdomains
+ if exclude_subdomains:
+ return {"domains": result_list, "main": _get_maindomain()}
+
+ domain_list_cache = {"domains": result_list, "main": _get_maindomain()}
+ return domain_list_cache
+
+
+def _assert_domain_exists(domain):
+ if domain not in domain_list()["domains"]:
+ raise YunohostValidationError("domain_name_unknown", domain=domain)
@is_unit_operation()
@@ -77,208 +118,259 @@ def domain_add(operation_logger, domain, dyndns=False):
from yunohost.hook import hook_callback
from yunohost.app import app_ssowatconf
from yunohost.utils.ldap import _get_ldap_interface
+ from yunohost.certificate import _certificate_install_selfsigned
+
+ if domain.startswith("xmpp-upload."):
+ raise YunohostValidationError("domain_cannot_add_xmpp_upload")
ldap = _get_ldap_interface()
try:
- ldap.validate_uniqueness({'virtualdomain': domain})
+ ldap.validate_uniqueness({"virtualdomain": domain})
except MoulinetteError:
- raise YunohostError('domain_exists')
+ raise YunohostValidationError("domain_exists")
- operation_logger.start()
+ # Lower domain to avoid some edge cases issues
+ # See: https://forum.yunohost.org/t/invalid-domain-causes-diagnosis-web-to-fail-fr-on-demand/11765
+ domain = domain.lower()
+
+ # Non-latin characters (e.g. café.com => xn--caf-dma.com)
+ domain = domain.encode("idna").decode("utf-8")
# DynDNS domain
if dyndns:
- # Do not allow to subscribe to multiple dyndns domains...
- if os.path.exists('/etc/cron.d/yunohost-dyndns'):
- raise YunohostError('domain_dyndns_already_subscribed')
+ from yunohost.dyndns import _dyndns_provides, _guess_current_dyndns_domain
- from yunohost.dyndns import dyndns_subscribe, _dyndns_provides
+ # Do not allow to subscribe to multiple dyndns domains...
+ if _guess_current_dyndns_domain("dyndns.yunohost.org") != (None, None):
+ raise YunohostValidationError("domain_dyndns_already_subscribed")
# Check that this domain can effectively be provided by
# dyndns.yunohost.org. (i.e. is it a nohost.me / noho.st)
if not _dyndns_provides("dyndns.yunohost.org", domain):
- raise YunohostError('domain_dyndns_root_unknown')
+ raise YunohostValidationError("domain_dyndns_root_unknown")
+
+ operation_logger.start()
+
+ if dyndns:
+ from yunohost.dyndns import dyndns_subscribe
# Actually subscribe
dyndns_subscribe(domain=domain)
- try:
- yunohost.certificate._certificate_install_selfsigned([domain], False)
+ _certificate_install_selfsigned([domain], False)
+ try:
attr_dict = {
- 'objectClass': ['mailDomain', 'top'],
- 'virtualdomain': domain,
+ "objectClass": ["mailDomain", "top"],
+ "virtualdomain": domain,
}
- if not ldap.add('virtualdomain=%s,ou=domains' % domain, attr_dict):
- raise YunohostError('domain_creation_failed')
+ try:
+ ldap.add("virtualdomain=%s,ou=domains" % domain, attr_dict)
+ except Exception as e:
+ raise YunohostError("domain_creation_failed", domain=domain, error=e)
+ finally:
+ global domain_list_cache
+ domain_list_cache = {}
# Don't regen these conf if we're still in postinstall
- if os.path.exists('/etc/yunohost/installed'):
- regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix', 'rspamd'])
+ if os.path.exists("/etc/yunohost/installed"):
+ # Sometime we have weird issues with the regenconf where some files
+ # appears as manually modified even though they weren't touched ...
+ # There are a few ideas why this happens (like backup/restore nginx
+ # conf ... which we shouldnt do ...). This in turns creates funky
+ # situation where the regenconf may refuse to re-create the conf
+ # (when re-creating a domain..)
+ # So here we force-clear the has out of the regenconf if it exists.
+ # This is a pretty ad hoc solution and only applied to nginx
+ # because it's one of the major service, but in the long term we
+ # should identify the root of this bug...
+ _force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain])
+ regen_conf(
+ names=["nginx", "metronome", "dnsmasq", "postfix", "rspamd", "mdns"]
+ )
app_ssowatconf()
- except Exception:
+ except Exception as e:
# Force domain removal silently
try:
- domain_remove(domain, True)
- except:
+ domain_remove(domain, force=True)
+ except Exception:
pass
- raise
+ raise e
- hook_callback('post_domain_add', args=[domain])
+ hook_callback("post_domain_add", args=[domain])
- logger.success(m18n.n('domain_created'))
+ logger.success(m18n.n("domain_created"))
@is_unit_operation()
-def domain_remove(operation_logger, domain, force=False):
+def domain_remove(operation_logger, domain, remove_apps=False, force=False):
"""
Delete domains
Keyword argument:
domain -- Domain to delete
- force -- Force the domain removal
+ remove_apps -- Remove applications installed on the domain
+ force -- Force the domain removal and don't not ask confirmation to
+ remove apps if remove_apps is specified
"""
from yunohost.hook import hook_callback
- from yunohost.app import app_ssowatconf
+ from yunohost.app import app_ssowatconf, app_info, app_remove
from yunohost.utils.ldap import _get_ldap_interface
- if not force and domain not in domain_list()['domains']:
- raise YunohostError('domain_unknown')
+ # the 'force' here is related to the exception happening in domain_add ...
+ # we don't want to check the domain exists because the ldap add may have
+ # failed
+ if not force:
+ _assert_domain_exists(domain)
# Check domain is not the main domain
if domain == _get_maindomain():
- raise YunohostError('domain_cannot_remove_main')
+ other_domains = domain_list()["domains"]
+ other_domains.remove(domain)
+
+ if other_domains:
+ raise YunohostValidationError(
+ "domain_cannot_remove_main",
+ domain=domain,
+ other_domains="\n * " + ("\n * ".join(other_domains)),
+ )
+ else:
+ raise YunohostValidationError(
+ "domain_cannot_remove_main_add_new_one", domain=domain
+ )
# Check if apps are installed on the domain
- for app in os.listdir('/etc/yunohost/apps/'):
- with open('/etc/yunohost/apps/' + app + '/settings.yml') as f:
- try:
- app_domain = yaml.load(f)['domain']
- except:
- continue
- else:
- if app_domain == domain:
- raise YunohostError('domain_uninstall_app_first')
+ apps_on_that_domain = []
+
+ for app in _installed_apps():
+ settings = _get_app_settings(app)
+ label = app_info(app)["name"]
+ if settings.get("domain") == domain:
+ apps_on_that_domain.append(
+ (
+ app,
+ ' - %s "%s" on https://%s%s'
+ % (app, label, domain, settings["path"])
+ if "path" in settings
+ else app,
+ )
+ )
+
+ if apps_on_that_domain:
+ if remove_apps:
+ if Moulinette.interface.type == "cli" and not force:
+ answer = Moulinette.prompt(
+ m18n.n(
+ "domain_remove_confirm_apps_removal",
+ apps="\n".join([x[1] for x in apps_on_that_domain]),
+ answers="y/N",
+ ),
+ color="yellow",
+ )
+ if answer.upper() != "Y":
+ raise YunohostError("aborting")
+
+ for app, _ in apps_on_that_domain:
+ app_remove(app)
+ else:
+ raise YunohostValidationError(
+ "domain_uninstall_app_first",
+ apps="\n".join([x[1] for x in apps_on_that_domain]),
+ )
operation_logger.start()
- ldap = _get_ldap_interface()
- if ldap.remove('virtualdomain=' + domain + ',ou=domains') or force:
- os.system('rm -rf /etc/yunohost/certs/%s' % domain)
- else:
- raise YunohostError('domain_deletion_failed')
- regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix'])
+ ldap = _get_ldap_interface()
+ try:
+ ldap.remove("virtualdomain=" + domain + ",ou=domains")
+ except Exception as e:
+ raise YunohostError("domain_deletion_failed", domain=domain, error=e)
+ finally:
+ global domain_list_cache
+ domain_list_cache = {}
+
+ stuff_to_delete = [
+ f"/etc/yunohost/certs/{domain}",
+ f"/etc/yunohost/dyndns/K{domain}.+*",
+ f"{DOMAIN_SETTINGS_DIR}/{domain}.yml",
+ ]
+
+ for stuff in stuff_to_delete:
+ os.system("rm -rf {stuff}")
+
+ # Sometime we have weird issues with the regenconf where some files
+ # appears as manually modified even though they weren't touched ...
+ # There are a few ideas why this happens (like backup/restore nginx
+ # conf ... which we shouldnt do ...). This in turns creates funky
+ # situation where the regenconf may refuse to re-create the conf
+ # (when re-creating a domain..)
+ #
+ # So here we force-clear the has out of the regenconf if it exists.
+ # This is a pretty ad hoc solution and only applied to nginx
+ # because it's one of the major service, but in the long term we
+ # should identify the root of this bug...
+ _force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain])
+ # And in addition we even force-delete the file Otherwise, if the file was
+ # manually modified, it may not get removed by the regenconf which leads to
+ # catastrophic consequences of nginx breaking because it can't load the
+ # cert file which disappeared etc..
+ if os.path.exists("/etc/nginx/conf.d/%s.conf" % domain):
+ _process_regen_conf(
+ "/etc/nginx/conf.d/%s.conf" % domain, new_conf=None, save=True
+ )
+
+ regen_conf(names=["nginx", "metronome", "dnsmasq", "postfix", "rspamd", "mdns"])
app_ssowatconf()
- hook_callback('post_domain_remove', args=[domain])
+ hook_callback("post_domain_remove", args=[domain])
- logger.success(m18n.n('domain_deleted'))
+ logger.success(m18n.n("domain_deleted"))
-def domain_dns_conf(domain, ttl=None):
+@is_unit_operation()
+def domain_main_domain(operation_logger, new_main_domain=None):
"""
- Generate DNS configuration for a domain
+ Check the current main domain, or change it
Keyword argument:
- domain -- Domain name
- ttl -- Time to live
+ new_main_domain -- The new domain to be set as the main domain
"""
+ from yunohost.tools import _set_hostname
- ttl = 3600 if ttl is None else ttl
+ # If no new domain specified, we return the current main domain
+ if not new_main_domain:
+ return {"current_main_domain": _get_maindomain()}
- dns_conf = _build_dns_conf(domain, ttl)
+ # Check domain exists
+ _assert_domain_exists(new_main_domain)
- result = ""
+ operation_logger.related_to.append(("domain", new_main_domain))
+ operation_logger.start()
- result += "; Basic ipv4/ipv6 records"
- for record in dns_conf["basic"]:
- result += "\n{name} {ttl} IN {type} {value}".format(**record)
+ # Apply changes to ssl certs
+ try:
+ write_to_file("/etc/yunohost/current_host", new_main_domain)
+ global domain_list_cache
+ domain_list_cache = {}
+ _set_hostname(new_main_domain)
+ except Exception as e:
+ logger.warning("%s" % e, exc_info=1)
+ raise YunohostError("main_domain_change_failed")
- result += "\n\n"
- result += "; XMPP"
- for record in dns_conf["xmpp"]:
- result += "\n{name} {ttl} IN {type} {value}".format(**record)
+ # Generate SSOwat configuration file
+ app_ssowatconf()
- result += "\n\n"
- result += "; Mail"
- for record in dns_conf["mail"]:
- result += "\n{name} {ttl} IN {type} {value}".format(**record)
- result += "\n\n"
+ # Regen configurations
+ if os.path.exists("/etc/yunohost/installed"):
+ regen_conf()
- result += "; Extra"
- for record in dns_conf["extra"]:
- result += "\n{name} {ttl} IN {type} {value}".format(**record)
-
- for name, record_list in dns_conf.items():
- if name not in ("basic", "xmpp", "mail", "extra") and record_list:
- result += "\n\n"
- result += "; " + name
- for record in record_list:
- result += "\n{name} {ttl} IN {type} {value}".format(**record)
-
- is_cli = True if msettings.get('interface') == 'cli' else False
- if is_cli:
- logger.info(m18n.n("domain_dns_conf_is_just_a_recommendation"))
-
- return result
-
-
-def domain_cert_status(domain_list, full=False):
- return yunohost.certificate.certificate_status(domain_list, full)
-
-
-def domain_cert_install(domain_list, force=False, no_checks=False, self_signed=False, staging=False):
- return yunohost.certificate.certificate_install(domain_list, force, no_checks, self_signed, staging)
-
-
-def domain_cert_renew(domain_list, force=False, no_checks=False, email=False, staging=False):
- return yunohost.certificate.certificate_renew(domain_list, force, no_checks, email, staging)
-
-
-def _get_conflicting_apps(domain, path, ignore_app=None):
- """
- Return a list of all conflicting apps with a domain/path (it can be empty)
-
- Keyword argument:
- domain -- The domain for the web path (e.g. your.domain.tld)
- path -- The path to check (e.g. /coffee)
- ignore_app -- An optional app id to ignore (c.f. the change_url usecase)
- """
-
- domain, path = _normalize_domain_path(domain, path)
-
- # Abort if domain is unknown
- if domain not in domain_list()['domains']:
- raise YunohostError('domain_unknown')
-
- # This import cannot be put on top of file because it would create a
- # recursive import...
- from yunohost.app import app_map
-
- # Fetch apps map
- apps_map = app_map(raw=True)
-
- # Loop through all apps to check if path is taken by one of them
- conflicts = []
- if domain in apps_map:
- # Loop through apps
- for p, a in apps_map[domain].items():
- if a["id"] == ignore_app:
- continue
- if path == p:
- conflicts.append((p, a["id"], a["label"]))
- # We also don't want conflicts with other apps starting with
- # same name
- elif path.startswith(p) or p.startswith(path):
- conflicts.append((p, a["id"], a["label"]))
-
- return conflicts
+ logger.success(m18n.n("main_domain_changed"))
def domain_url_available(domain, path):
@@ -294,227 +386,140 @@ def domain_url_available(domain, path):
def _get_maindomain():
- with open('/etc/yunohost/current_host', 'r') as f:
+ with open("/etc/yunohost/current_host", "r") as f:
maindomain = f.readline().rstrip()
return maindomain
-def _set_maindomain(domain):
- with open('/etc/yunohost/current_host', 'w') as f:
- f.write(domain)
-
-
-def _normalize_domain_path(domain, path):
-
- # We want url to be of the format :
- # some.domain.tld/foo
-
- # Remove http/https prefix if it's there
- if domain.startswith("https://"):
- domain = domain[len("https://"):]
- elif domain.startswith("http://"):
- domain = domain[len("http://"):]
-
- # Remove trailing slashes
- domain = domain.rstrip("/").lower()
- path = "/" + path.strip("/")
-
- return domain, path
-
-
-def _build_dns_conf(domain, ttl=3600):
+def domain_config_get(domain, key="", full=False, export=False):
"""
- Internal function that will returns a data structure containing the needed
- information to generate/adapt the dns configuration
-
- The returned datastructure will have the following form:
- {
- "basic": [
- # if ipv4 available
- {"type": "A", "name": "@", "value": "123.123.123.123", "ttl": 3600},
- {"type": "A", "name": "*", "value": "123.123.123.123", "ttl": 3600},
- # if ipv6 available
- {"type": "AAAA", "name": "@", "value": "valid-ipv6", "ttl": 3600},
- {"type": "AAAA", "name": "*", "value": "valid-ipv6", "ttl": 3600},
- ],
- "xmpp": [
- {"type": "SRV", "name": "_xmpp-client._tcp", "value": "0 5 5222 domain.tld.", "ttl": 3600},
- {"type": "SRV", "name": "_xmpp-server._tcp", "value": "0 5 5269 domain.tld.", "ttl": 3600},
- {"type": "CNAME", "name": "muc", "value": "@", "ttl": 3600},
- {"type": "CNAME", "name": "pubsub", "value": "@", "ttl": 3600},
- {"type": "CNAME", "name": "vjud", "value": "@", "ttl": 3600}
- ],
- "mail": [
- {"type": "MX", "name": "@", "value": "10 domain.tld.", "ttl": 3600},
- {"type": "TXT", "name": "@", "value": "\"v=spf1 a mx ip4:123.123.123.123 ipv6:valid-ipv6 -all\"", "ttl": 3600 },
- {"type": "TXT", "name": "mail._domainkey", "value": "\"v=DKIM1; k=rsa; p=some-super-long-key\"", "ttl": 3600},
- {"type": "TXT", "name": "_dmarc", "value": "\"v=DMARC1; p=none\"", "ttl": 3600}
- ],
- "extra": [
- {"type": "CAA", "name": "@", "value": "128 issue \"letsencrypt.org\"", "ttl": 3600},
- ],
- "example_of_a_custom_rule": [
- {"type": "SRV", "name": "_matrix", "value": "domain.tld.", "ttl": 3600}
- ],
- }
+ Display a domain configuration
"""
- ipv4 = get_public_ip()
- ipv6 = get_public_ip(6)
-
- basic = []
-
- # Basic ipv4/ipv6 records
- if ipv4:
- basic += [
- ["@", ttl, "A", ipv4],
- ["*", ttl, "A", ipv4],
- ]
-
- if ipv6:
- basic += [
- ["@", ttl, "AAAA", ipv6],
- ["*", ttl, "AAAA", ipv6],
- ]
-
- # XMPP
- xmpp = [
- ["_xmpp-client._tcp", ttl, "SRV", "0 5 5222 %s." % domain],
- ["_xmpp-server._tcp", ttl, "SRV", "0 5 5269 %s." % domain],
- ["muc", ttl, "CNAME", "@"],
- ["pubsub", ttl, "CNAME", "@"],
- ["vjud", ttl, "CNAME", "@"],
- ]
-
- # SPF record
- spf_record = '"v=spf1 a mx'
- if ipv4:
- spf_record += ' ip4:{ip4}'.format(ip4=ipv4)
- if ipv6:
- spf_record += ' ip6:{ip6}'.format(ip6=ipv6)
- spf_record += ' -all"'
-
- # Email
- mail = [
- ["@", ttl, "MX", "10 %s." % domain],
- ["@", ttl, "TXT", spf_record],
- ]
-
- # DKIM/DMARC record
- dkim_host, dkim_publickey = _get_DKIM(domain)
-
- if dkim_host:
- mail += [
- [dkim_host, ttl, "TXT", dkim_publickey],
- ["_dmarc", ttl, "TXT", '"v=DMARC1; p=none"'],
- ]
-
- # Extra
- extra = [
- ["@", ttl, "CAA", '128 issue "letsencrypt.org"']
- ]
-
- # Official record
- records = {
- "basic": [{"name": name, "ttl": ttl, "type": type_, "value": value} for name, ttl, type_, value in basic],
- "xmpp": [{"name": name, "ttl": ttl, "type": type_, "value": value} for name, ttl, type_, value in xmpp],
- "mail": [{"name": name, "ttl": ttl, "type": type_, "value": value} for name, ttl, type_, value in mail],
- "extra": [{"name": name, "ttl": ttl, "type": type_, "value": value} for name, ttl, type_, value in extra],
- }
-
- # Custom records
- hook_results = hook_callback('custom_dns_rules', args=[domain])
- for hook_name, results in hook_results.items():
- #
- # There can be multiple results per hook name, so results look like
- # {'/some/path/to/hook1':
- # { 'state': 'succeed',
- # 'stdreturn': [{'type': 'SRV',
- # 'name': 'stuff.foo.bar.',
- # 'value': 'yoloswag',
- # 'ttl': 3600}]
- # },
- # '/some/path/to/hook2':
- # { ... },
- # [...]
- #
- # Loop over the sub-results
- custom_records = [v['stdreturn'] for v in results.values()
- if v and v['stdreturn']]
-
- records[hook_name] = []
- for record_list in custom_records:
- # Check that record_list is indeed a list of dict
- # with the required keys
- if not isinstance(record_list, list) \
- or any(not isinstance(record, dict) for record in record_list) \
- or any(key not in record for record in record_list for key in ["name", "ttl", "type", "value"]):
- # Display an error, mainly for app packagers trying to implement a hook
- logger.warning("Ignored custom record from hook '%s' because the data is not a *list* of dict with keys name, ttl, type and value. Raw data : %s" % (hook_name, record_list))
- continue
-
- records[hook_name].extend(record_list)
-
- return records
-
-
-def _get_DKIM(domain):
- DKIM_file = '/etc/dkim/{domain}.mail.txt'.format(domain=domain)
-
- if not os.path.isfile(DKIM_file):
- return (None, None)
-
- with open(DKIM_file) as f:
- dkim_content = f.read()
-
- # Gotta manage two formats :
- #
- # Legacy
- # -----
- #
- # mail._domainkey IN TXT ( "v=DKIM1; k=rsa; "
- # "p=" )
- #
- # New
- # ------
- #
- # mail._domainkey IN TXT ( "v=DKIM1; h=sha256; k=rsa; "
- # "p=" )
-
- is_legacy_format = " h=sha256; " not in dkim_content
-
- # Legacy DKIM format
- if is_legacy_format:
- dkim = re.match((
- r'^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+'
- '[^"]*"v=(?P[^";]+);'
- '[\s"]*k=(?P