mirror of
https://github.com/YunoHost/yunohost.git
synced 2024-09-03 20:06:10 +02:00
Merge branch 'permission_protection' into remove_legacy_settings
This commit is contained in:
commit
8e1e1e607b
132 changed files with 5700 additions and 3837 deletions
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -13,10 +13,3 @@
|
|||
## How to test
|
||||
|
||||
...
|
||||
|
||||
## Validation
|
||||
|
||||
- [ ] Principle agreement 0/2 :
|
||||
- [ ] Quick review 0/1 :
|
||||
- [ ] Simple test 0/1 :
|
||||
- [ ] Deep review 0/1 :
|
||||
|
|
113
.gitlab-ci.yml
113
.gitlab-ci.yml
|
@ -1,101 +1,22 @@
|
|||
stages:
|
||||
- postinstall
|
||||
- build
|
||||
- install
|
||||
- tests
|
||||
- lint
|
||||
- doc
|
||||
|
||||
.tests:
|
||||
image: after-postinstall
|
||||
before_script:
|
||||
- apt-get install python-pip -y
|
||||
- mkdir -p .pip
|
||||
- pip install -U pip
|
||||
- hash -d pip
|
||||
- pip --cache-dir=.pip install pytest pytest-sugar pytest-mock requests-mock mock
|
||||
- pushd src/yunohost/tests
|
||||
- >
|
||||
if [ ! -d "./apps" ]; then
|
||||
git clone https://github.com/YunoHost/test_apps ./apps
|
||||
fi
|
||||
- cd apps
|
||||
- git pull > /dev/null 2>&1
|
||||
- popd
|
||||
- export PYTEST_ADDOPTS="--color=yes"
|
||||
cache:
|
||||
paths:
|
||||
- .pip
|
||||
- src/yunohost/tests/apps
|
||||
key: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"
|
||||
default:
|
||||
tags:
|
||||
- yunohost-ci
|
||||
# All jobs are interruptible by default
|
||||
interruptible: true
|
||||
|
||||
postinstall:
|
||||
image: before-postinstall
|
||||
stage: postinstall
|
||||
script:
|
||||
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns
|
||||
variables:
|
||||
YNH_BUILD_DIR: "ynh-build"
|
||||
|
||||
root-tests:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- py.test tests
|
||||
|
||||
test-apps:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_apps.py
|
||||
|
||||
test-appscatalog:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_appscatalog.py
|
||||
|
||||
test-appurl:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_appurl.py
|
||||
|
||||
test-backuprestore:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_backuprestore.py
|
||||
|
||||
test-changeurl:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_changeurl.py
|
||||
|
||||
test-permission:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_permission.py
|
||||
|
||||
test-settings:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_settings.py
|
||||
|
||||
test-user-group:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_user-group.py
|
||||
|
||||
test-regenconf:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_regenconf.py
|
||||
include:
|
||||
- local: .gitlab/ci/build.gitlab-ci.yml
|
||||
- local: .gitlab/ci/install.gitlab-ci.yml
|
||||
- local: .gitlab/ci/test.gitlab-ci.yml
|
||||
- local: .gitlab/ci/lint.gitlab-ci.yml
|
||||
- local: .gitlab/ci/doc.gitlab-ci.yml
|
||||
|
|
52
.gitlab/ci/build.gitlab-ci.yml
Normal file
52
.gitlab/ci/build.gitlab-ci.yml
Normal file
|
@ -0,0 +1,52 @@
|
|||
.build-stage:
|
||||
stage: build
|
||||
image: "before-install"
|
||||
variables:
|
||||
YNH_SOURCE: "https://github.com/yunohost"
|
||||
before_script:
|
||||
- mkdir -p $YNH_BUILD_DIR
|
||||
artifacts:
|
||||
paths:
|
||||
- $YNH_BUILD_DIR/*.deb
|
||||
|
||||
.build_script: &build_script
|
||||
- cd $YNH_BUILD_DIR/$PACKAGE
|
||||
- VERSION=$(dpkg-parsechangelog -S Version 2>/dev/null)
|
||||
- VERSION_NIGHTLY="${VERSION}+$(date +%Y%m%d%H%M)"
|
||||
- dch --package "${PACKAGE}" --force-bad-version -v "${VERSION_NIGHTLY}" -D "unstable" --force-distribution "Daily build."
|
||||
- debuild --no-lintian -us -uc
|
||||
|
||||
########################################
|
||||
# BUILD DEB
|
||||
########################################
|
||||
|
||||
build-yunohost:
|
||||
extends: .build-stage
|
||||
variables:
|
||||
PACKAGE: "yunohost"
|
||||
script:
|
||||
- git ls-files | xargs tar -czf archive.tar.gz
|
||||
- mkdir -p $YNH_BUILD_DIR/$PACKAGE
|
||||
- cat archive.tar.gz | tar -xz -C $YNH_BUILD_DIR/$PACKAGE
|
||||
- rm archive.tar.gz
|
||||
- DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE
|
||||
- *build_script
|
||||
|
||||
|
||||
build-ssowat:
|
||||
extends: .build-stage
|
||||
variables:
|
||||
PACKAGE: "ssowat"
|
||||
script:
|
||||
- git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE --depth 1 || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE --depth 1
|
||||
- DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE
|
||||
- *build_script
|
||||
|
||||
build-moulinette:
|
||||
extends: .build-stage
|
||||
variables:
|
||||
PACKAGE: "moulinette"
|
||||
script:
|
||||
- git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE --depth 1 || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE --depth 1
|
||||
- DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE
|
||||
- *build_script
|
14
.gitlab/ci/doc.gitlab-ci.yml
Normal file
14
.gitlab/ci/doc.gitlab-ci.yml
Normal file
|
@ -0,0 +1,14 @@
|
|||
########################################
|
||||
# DOC
|
||||
########################################
|
||||
|
||||
generate-helpers-doc:
|
||||
stage: doc
|
||||
image: "before-install"
|
||||
needs: []
|
||||
script:
|
||||
- cd doc
|
||||
- python generate_helper_doc.py
|
||||
artifacts:
|
||||
paths:
|
||||
- doc/helpers.html
|
29
.gitlab/ci/install.gitlab-ci.yml
Normal file
29
.gitlab/ci/install.gitlab-ci.yml
Normal file
|
@ -0,0 +1,29 @@
|
|||
.install-stage:
|
||||
stage: install
|
||||
needs:
|
||||
- job: build-yunohost
|
||||
artifacts: true
|
||||
- job: build-ssowat
|
||||
artifacts: true
|
||||
- job: build-moulinette
|
||||
artifacts: true
|
||||
|
||||
########################################
|
||||
# INSTALL DEB
|
||||
########################################
|
||||
|
||||
upgrade:
|
||||
extends: .install-stage
|
||||
image: "after-install"
|
||||
script:
|
||||
- apt-get update -o Acquire::Retries=3
|
||||
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
|
||||
|
||||
|
||||
install-postinstall:
|
||||
extends: .install-stage
|
||||
image: "before-install"
|
||||
script:
|
||||
- apt-get update -o Acquire::Retries=3
|
||||
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
|
||||
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns
|
43
.gitlab/ci/lint.gitlab-ci.yml
Normal file
43
.gitlab/ci/lint.gitlab-ci.yml
Normal file
|
@ -0,0 +1,43 @@
|
|||
########################################
|
||||
# LINTER
|
||||
########################################
|
||||
# later we must fix lint and format-check jobs and remove "allow_failure"
|
||||
|
||||
lint27:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
allow_failure: true
|
||||
script:
|
||||
- tox -e py27-lint
|
||||
|
||||
lint37:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
allow_failure: true
|
||||
script:
|
||||
- tox -e py37-lint
|
||||
|
||||
invalidcode27:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
script:
|
||||
- tox -e py27-invalidcode
|
||||
|
||||
invalidcode37:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
allow_failure: true
|
||||
needs: []
|
||||
script:
|
||||
- tox -e py37-invalidcode
|
||||
|
||||
format-check:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
allow_failure: true
|
||||
script:
|
||||
- tox -e py37-black
|
120
.gitlab/ci/test.gitlab-ci.yml
Normal file
120
.gitlab/ci/test.gitlab-ci.yml
Normal file
|
@ -0,0 +1,120 @@
|
|||
.install_debs: &install_debs
|
||||
- apt-get update -o Acquire::Retries=3
|
||||
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
|
||||
|
||||
.test-stage:
|
||||
stage: tests
|
||||
image: "after-install"
|
||||
variables:
|
||||
PYTEST_ADDOPTS: "--color=yes"
|
||||
before_script:
|
||||
- *install_debs
|
||||
cache:
|
||||
paths:
|
||||
- src/yunohost/tests/apps
|
||||
key: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"
|
||||
needs:
|
||||
- job: build-yunohost
|
||||
artifacts: true
|
||||
- job: build-ssowat
|
||||
artifacts: true
|
||||
- job: build-moulinette
|
||||
artifacts: true
|
||||
- job: upgrade
|
||||
|
||||
|
||||
########################################
|
||||
# TESTS
|
||||
########################################
|
||||
|
||||
full-tests:
|
||||
stage: tests
|
||||
image: "before-install"
|
||||
variables:
|
||||
PYTEST_ADDOPTS: "--color=yes"
|
||||
before_script:
|
||||
- *install_debs
|
||||
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns
|
||||
script:
|
||||
- python -m pytest --cov=yunohost tests/ src/yunohost/tests/ --junitxml=report.xml
|
||||
needs:
|
||||
- job: build-yunohost
|
||||
artifacts: true
|
||||
- job: build-ssowat
|
||||
artifacts: true
|
||||
- job: build-moulinette
|
||||
artifacts: true
|
||||
artifacts:
|
||||
reports:
|
||||
junit: report.xml
|
||||
|
||||
root-tests:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- python -m pytest tests
|
||||
|
||||
test-apps:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_apps.py
|
||||
|
||||
test-appscatalog:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_appscatalog.py
|
||||
|
||||
test-appurl:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_appurl.py
|
||||
|
||||
test-apps-arguments-parsing:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_apps_arguments_parsing.py
|
||||
|
||||
test-backuprestore:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_backuprestore.py
|
||||
|
||||
test-changeurl:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_changeurl.py
|
||||
|
||||
test-permission:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_permission.py
|
||||
|
||||
test-settings:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_settings.py
|
||||
|
||||
test-user-group:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_user-group.py
|
||||
|
||||
test-regenconf:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_regenconf.py
|
||||
|
||||
test-service:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- python -m pytest tests/test_service.py
|
12
.travis.yml
12
.travis.yml
|
@ -2,12 +2,18 @@ language: python
|
|||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- env: TOXENV=lint
|
||||
- env: TOXENV=py27-lint
|
||||
- env: TOXENV=py37-lint
|
||||
- env: TOXENV=py37-invalidcode
|
||||
include:
|
||||
- python: 2.7
|
||||
env: TOXENV=py27
|
||||
env: TOXENV=py27-lint
|
||||
- python: 2.7
|
||||
env: TOXENV=lint
|
||||
env: TOXENV=py27-invalidcode
|
||||
- python: 3.7
|
||||
env: TOXENV=py37-lint
|
||||
- python: 3.7
|
||||
env: TOXENV=py37-invalidcode
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
|
|
@ -1005,7 +1005,6 @@ service:
|
|||
choices:
|
||||
- file
|
||||
- systemd
|
||||
default: file
|
||||
--test_status:
|
||||
help: Specify a custom bash command to check the status of the service. Note that it only makes sense to specify this if the corresponding systemd service does not return the proper information already.
|
||||
--test_conf:
|
||||
|
@ -1668,6 +1667,10 @@ log:
|
|||
--share:
|
||||
help: Share the full log using yunopaste
|
||||
action: store_true
|
||||
-i:
|
||||
full: --filter-irrelevant
|
||||
help: Do not show some lines deemed not relevant (like set +x or helper argument parsing)
|
||||
action: store_true
|
||||
|
||||
|
||||
#############################
|
||||
|
@ -1697,6 +1700,9 @@ diagnosis:
|
|||
--share:
|
||||
help: Share the logs using yunopaste
|
||||
action: store_true
|
||||
--human-readable:
|
||||
help: Show a human-readable output
|
||||
action: store_true
|
||||
|
||||
run:
|
||||
action_help: Run diagnosis
|
||||
|
@ -1711,6 +1717,9 @@ diagnosis:
|
|||
--except-if-never-ran-yet:
|
||||
help: Don't run anything if diagnosis never ran yet ... (this is meant to be used by the webadmin)
|
||||
action: store_true
|
||||
--email:
|
||||
help: Send an email to root with issues found (this is meant to be used by cron job)
|
||||
action: store_true
|
||||
|
||||
ignore:
|
||||
action_help: Configure some diagnosis results to be ignored and therefore not considered as actual issues
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
# Requires YunoHost version 3.3.1 or higher.
|
||||
ynh_wait_dpkg_free() {
|
||||
local try
|
||||
set +o xtrace # set +x
|
||||
# With seq 1 17, timeout will be almost 30 minutes
|
||||
for try in `seq 1 17`
|
||||
do
|
||||
|
@ -32,13 +33,16 @@ ynh_wait_dpkg_free() {
|
|||
then
|
||||
# If so, that a remaining of dpkg.
|
||||
ynh_print_err "E: dpkg was interrupted, you must manually run 'sudo dpkg --configure -a' to correct the problem."
|
||||
set -o xtrace # set -x
|
||||
return 1
|
||||
fi
|
||||
done 9<<< "$(ls -1 $dpkg_dir)"
|
||||
set -o xtrace # set -x
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
echo "apt still used, but timeout reached !"
|
||||
set -o xtrace # set -x
|
||||
}
|
||||
|
||||
# Check either a package is installed or not
|
||||
|
@ -96,7 +100,7 @@ ynh_package_version() {
|
|||
# Requires YunoHost version 2.4.0.3 or higher.
|
||||
ynh_apt() {
|
||||
ynh_wait_dpkg_free
|
||||
LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get --assume-yes $@
|
||||
LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get --assume-yes --quiet -o=Acquire::Retries=3 -o=Dpkg::Use-Pty=0 $@
|
||||
}
|
||||
|
||||
# Update package index files
|
||||
|
@ -188,12 +192,10 @@ ynh_package_install_from_equivs () {
|
|||
(cd "$TMPDIR"
|
||||
LC_ALL=C equivs-build ./control 1> /dev/null
|
||||
dpkg --force-depends --install "./${pkgname}_${pkgversion}_all.deb" 2>&1)
|
||||
# If install fails we use "apt-get check" to try to debug and diagnose possible unmet dependencies
|
||||
# Note the use of { } which allows to group commands without starting a subshell (otherwise the ynh_die wouldn't exit the current shell).
|
||||
# Be careful with the syntax : the semicolon + space at the end is important!
|
||||
|
||||
ynh_package_install --fix-broken || \
|
||||
{ # If the installation failed
|
||||
# (the following is ran inside { } to not start a subshell otherwise ynh_die wouldnt exit the original process)
|
||||
# Get the list of dependencies from the deb
|
||||
local dependencies="$(dpkg --info "$TMPDIR/${pkgname}_${pkgversion}_all.deb" | grep Depends | \
|
||||
sed 's/^ Depends: //' | sed 's/,//g')"
|
||||
|
@ -258,15 +260,17 @@ ynh_install_app_dependencies () {
|
|||
# And we have packages from sury installed (7.0.33-10+weirdshiftafter instead of 7.0.33-0 on debian)
|
||||
if dpkg --list | grep "php7.0" | grep --quiet --invert-match "7.0.33-0+deb9"
|
||||
then
|
||||
# And sury ain't already installed
|
||||
if ! grep --line-number --recursive --quiet "sury" /etc/apt/sources.list*
|
||||
# And sury ain't already in sources.lists
|
||||
if ! grep --recursive --quiet "^ *deb.*sury" /etc/apt/sources.list*
|
||||
then
|
||||
# Re-add sury
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --name=extra_php_version
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --name=extra_php_version --priority=600
|
||||
|
||||
# Pin this sury repository to prevent sury of doing shit
|
||||
ynh_pin_repo --package="*" --pin="origin \"packages.sury.org\"" --priority=200 --name=extra_php_version
|
||||
ynh_pin_repo --package="php${$YNH_DEFAULT_PHP_VERSION}*" --pin="origin \"packages.sury.org\"" --priority=600 --name=extra_php_version --append
|
||||
for package_to_not_upgrade in "php" "php-fpm" "php-mysql" "php-xml" "php-zip" "php-mbstring" "php-ldap" "php-gd" "php-curl" "php-bz2" "php-json" "php-sqlite3" "php-intl" "openssl" "libssl1.1" "libssl-dev"
|
||||
do
|
||||
ynh_pin_repo --package="$package_to_not_upgrade" --pin="origin \"packages.sury.org\"" --priority="-1" --name=extra_php_version --append
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -331,8 +335,6 @@ ynh_remove_app_dependencies () {
|
|||
ynh_package_autopurge ${dep_app}-ynh-deps # Remove the fake package and its dependencies if they not still used.
|
||||
}
|
||||
|
||||
#=================================================
|
||||
|
||||
# Install packages from an extra repository properly.
|
||||
#
|
||||
# usage: ynh_install_extra_app_dependencies --repo="repo" --package="dep1 dep2" [--key=key_url] [--name=name]
|
||||
|
@ -438,7 +440,8 @@ ynh_install_extra_repo () {
|
|||
if [ -n "$key" ]
|
||||
then
|
||||
mkdir --parents "/etc/apt/trusted.gpg.d"
|
||||
wget --quiet "$key" --output-document=- | gpg --dearmor | $wget_append /etc/apt/trusted.gpg.d/$name.gpg > /dev/null
|
||||
# Timeout option is here to enforce the timeout on dns query and tcp connect (c.f. man wget)
|
||||
wget --timeout 900 --quiet "$key" --output-document=- | gpg --dearmor | $wget_append /etc/apt/trusted.gpg.d/$name.gpg > /dev/null
|
||||
fi
|
||||
|
||||
# Update the list of package with the new repo
|
||||
|
@ -464,8 +467,8 @@ ynh_remove_extra_repo () {
|
|||
|
||||
ynh_secure_remove "/etc/apt/sources.list.d/$name.list"
|
||||
ynh_secure_remove "/etc/apt/preferences.d/$name"
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.gpg"
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.asc"
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.gpg" > /dev/null
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.asc" > /dev/null
|
||||
|
||||
# Update the list of package to exclude the old repo
|
||||
ynh_package_update
|
||||
|
|
|
@ -134,7 +134,7 @@ EOF
|
|||
|
||||
ynh_systemd_action --service_name=fail2ban --action=reload --line_match="(Started|Reloaded) Fail2Ban Service" --log_path=systemd
|
||||
|
||||
local fail2ban_error="$(journalctl --unit=fail2ban | tail --lines=50 | grep "WARNING.*$app.*")"
|
||||
local fail2ban_error="$(journalctl --no-hostname --unit=fail2ban | tail --lines=50 | grep "WARNING.*$app.*")"
|
||||
if [[ -n "$fail2ban_error" ]]
|
||||
then
|
||||
ynh_print_err --message="Fail2ban failed to load the jail for $app"
|
||||
|
|
|
@ -80,10 +80,10 @@ ynh_handle_getopts_args () {
|
|||
arguments[arg]="${arguments[arg]//--${args_array[$option_flag]}-/--${args_array[$option_flag]}\\TOBEREMOVED\\-}"
|
||||
# And replace long option (value of the option_flag) by the short option, the option_flag itself
|
||||
# (e.g. for [u]=user, --user will be -u)
|
||||
# Replace long option with =
|
||||
arguments[arg]="${arguments[arg]//--${args_array[$option_flag]}/-${option_flag} }"
|
||||
# And long option without =
|
||||
arguments[arg]="${arguments[arg]//--${args_array[$option_flag]%=}/-${option_flag}}"
|
||||
# Replace long option with = (match the beginning of the argument)
|
||||
arguments[arg]="$(echo "${arguments[arg]}" | sed "s/^--${args_array[$option_flag]}/-${option_flag} /")"
|
||||
# And long option without = (match the whole line)
|
||||
arguments[arg]="$(echo "${arguments[arg]}" | sed "s/^--${args_array[$option_flag]%=}$/-${option_flag} /")"
|
||||
done
|
||||
done
|
||||
|
||||
|
|
|
@ -25,17 +25,17 @@ ynh_get_ram () {
|
|||
free=${free:-0}
|
||||
total=${total:-0}
|
||||
|
||||
local total_ram=$(vmstat --stats --unit M | grep "total memory" | awk '{print $1}')
|
||||
local total_swap=$(vmstat --stats --unit M | grep "total swap" | awk '{print $1}')
|
||||
local total_ram_swap=$(( total_ram + total_swap ))
|
||||
|
||||
local free_ram=$(vmstat --stats --unit M | grep "free memory" | awk '{print $1}')
|
||||
local free_swap=$(vmstat --stats --unit M | grep "free swap" | awk '{print $1}')
|
||||
local free_ram_swap=$(( free_ram + free_swap ))
|
||||
|
||||
# Use the total amount of ram
|
||||
if [ $free -eq 1 ]
|
||||
if [ $free -eq $total ]
|
||||
then
|
||||
ynh_print_warn --message="You have to choose --free or --total when using ynh_get_ram"
|
||||
ram=0
|
||||
# Use the total amount of ram
|
||||
elif [ $free -eq 1 ]
|
||||
then
|
||||
local free_ram=$(vmstat --stats --unit M | grep "free memory" | awk '{print $1}')
|
||||
local free_swap=$(vmstat --stats --unit M | grep "free swap" | awk '{print $1}')
|
||||
local free_ram_swap=$(( free_ram + free_swap ))
|
||||
|
||||
# Use the total amount of free ram
|
||||
local ram=$free_ram_swap
|
||||
if [ $ignore_swap -eq 1 ]
|
||||
|
@ -49,6 +49,10 @@ ynh_get_ram () {
|
|||
fi
|
||||
elif [ $total -eq 1 ]
|
||||
then
|
||||
local total_ram=$(vmstat --stats --unit M | grep "total memory" | awk '{print $1}')
|
||||
local total_swap=$(vmstat --stats --unit M | grep "total swap" | awk '{print $1}')
|
||||
local total_ram_swap=$(( total_ram + total_swap ))
|
||||
|
||||
local ram=$total_ram_swap
|
||||
if [ $ignore_swap -eq 1 ]
|
||||
then
|
||||
|
@ -59,9 +63,6 @@ ynh_get_ram () {
|
|||
# Use only the amount of free swap
|
||||
ram=$total_swap
|
||||
fi
|
||||
else
|
||||
ynh_print_warn --message="You have to choose --free or --total when using ynh_get_ram"
|
||||
ram=0
|
||||
fi
|
||||
|
||||
echo $ram
|
||||
|
|
|
@ -80,7 +80,7 @@ ynh_print_warn () {
|
|||
# Manage arguments with getopts
|
||||
ynh_handle_getopts_args "$@"
|
||||
|
||||
ynh_print_log "\e[93m\e[1m[WARN]\e[0m ${message}" >&2
|
||||
ynh_print_log "${message}" >&2
|
||||
}
|
||||
|
||||
# Print an error on stderr
|
||||
|
@ -97,7 +97,7 @@ ynh_print_err () {
|
|||
# Manage arguments with getopts
|
||||
ynh_handle_getopts_args "$@"
|
||||
|
||||
ynh_print_log "\e[91m\e[1m[ERR]\e[0m ${message}" >&2
|
||||
ynh_print_log "[Error] ${message}" >&2
|
||||
}
|
||||
|
||||
# Execute a command and print the result as an error
|
||||
|
@ -216,7 +216,7 @@ base_time=$(date +%s)
|
|||
# | arg: -m, --message= - The text to print
|
||||
# | arg: -w, --weight= - The weight for this progression. This value is 1 by default. Use a bigger value for a longer part of the script.
|
||||
# | arg: -t, --time - Print the execution time since the last call to this helper. Especially usefull to define weights. The execution time is given for the duration since the previous call. So the weight should be applied to this previous call.
|
||||
# | arg: -l, --last - Use for the last call of the helper, to fill te progression bar.
|
||||
# | arg: -l, --last - Use for the last call of the helper, to fill the progression bar.
|
||||
#
|
||||
# Requires YunoHost version 3.5.0 or higher.
|
||||
ynh_script_progression () {
|
||||
|
@ -332,7 +332,7 @@ ynh_debug () {
|
|||
|
||||
if [ -n "$message" ]
|
||||
then
|
||||
ynh_print_log "\e[34m\e[1m[DEBUG]\e[0m ${message}" >&2
|
||||
ynh_print_log "[Debug] ${message}" >&2
|
||||
fi
|
||||
|
||||
if [ "$trace" == "1" ]
|
||||
|
|
|
@ -44,8 +44,13 @@ ynh_mysql_execute_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
ynh_mysql_connect_as --user="root" --password="$(cat $MYSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" <<< "$sql"
|
||||
$database <<< "$sql"
|
||||
}
|
||||
|
||||
# Execute a command from a file as root user
|
||||
|
@ -65,8 +70,14 @@ ynh_mysql_execute_file_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
|
||||
ynh_mysql_connect_as --user="root" --password="$(cat $MYSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" < "$file"
|
||||
$database < "$file"
|
||||
}
|
||||
|
||||
# Create a database and grant optionnaly privilegies to a user
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
n_version=6.5.1
|
||||
n_install_dir="/opt/node_n"
|
||||
node_version_path="$n_install_dir/n/versions/node"
|
||||
# N_PREFIX is the directory of n, it needs to be loaded as a environment variable.
|
||||
|
@ -16,8 +17,8 @@ ynh_install_n () {
|
|||
ynh_print_info --message="Installation of N - Node.js version management"
|
||||
# Build an app.src for n
|
||||
mkdir --parents "../conf"
|
||||
echo "SOURCE_URL=https://github.com/tj/n/archive/v4.1.0.tar.gz
|
||||
SOURCE_SUM=3983fa3f00d4bf85ba8e21f1a590f6e28938093abe0bb950aeea52b1717471fc" > "../conf/n.src"
|
||||
echo "SOURCE_URL=https://github.com/tj/n/archive/v${n_version}.tar.gz
|
||||
SOURCE_SUM=5833f15893b9951a9ed59487e87b6c181d96b83a525846255872c4f92f0d25dd" > "../conf/n.src"
|
||||
# Download and extract n
|
||||
ynh_setup_source --dest_dir="$n_install_dir/git" --source_id=n
|
||||
# Install n
|
||||
|
@ -28,14 +29,38 @@ SOURCE_SUM=3983fa3f00d4bf85ba8e21f1a590f6e28938093abe0bb950aeea52b1717471fc" > "
|
|||
# Load the version of node for an app, and set variables.
|
||||
#
|
||||
# ynh_use_nodejs has to be used in any app scripts before using node for the first time.
|
||||
# This helper will provide alias and variables to use in your scripts.
|
||||
#
|
||||
# 2 variables are available:
|
||||
# - $nodejs_path: The absolute path of node for the chosen version.
|
||||
# To use npm or node, use the alias `ynh_npm` and `ynh_node`
|
||||
# Those alias will use the correct version installed for the app
|
||||
# For example: use `ynh_npm install` instead of `npm install`
|
||||
#
|
||||
# With `sudo` or `ynh_exec_as`, use instead the fallback variables `$ynh_npm` and `$ynh_node`
|
||||
# And propagate $PATH to sudo with $ynh_node_load_PATH
|
||||
# Exemple: `ynh_exec_as $app $ynh_node_load_PATH $ynh_npm install`
|
||||
#
|
||||
# $PATH contains the path of the requested version of node.
|
||||
# However, $PATH is duplicated into $node_PATH to outlast any manipulation of $PATH
|
||||
# You can use the variable `$ynh_node_load_PATH` to quickly load your node version
|
||||
# in $PATH for an usage into a separate script.
|
||||
# Exemple: $ynh_node_load_PATH $final_path/script_that_use_npm.sh`
|
||||
#
|
||||
#
|
||||
# Finally, to start a nodejs service with the correct version, 2 solutions
|
||||
# Either the app is dependent of node or npm, but does not called it directly.
|
||||
# In such situation, you need to load PATH
|
||||
# `Environment="__NODE_ENV_PATH__"`
|
||||
# `ExecStart=__FINALPATH__/my_app`
|
||||
# You will replace __NODE_ENV_PATH__ with $ynh_node_load_PATH
|
||||
#
|
||||
# Or node start the app directly, then you don't need to load the PATH variable
|
||||
# `ExecStart=__YNH_NODE__ my_app run`
|
||||
# You will replace __YNH_NODE__ with $ynh_node
|
||||
#
|
||||
#
|
||||
# 2 other variables are also available
|
||||
# - $nodejs_path: The absolute path to node binaries for the chosen version.
|
||||
# - $nodejs_version: Just the version number of node for this app. Stored as 'nodejs_version' in settings.yml.
|
||||
# And 2 alias stored in variables:
|
||||
# - $nodejs_use_version: An old variable, not used anymore. Keep here to not break old apps
|
||||
# NB: $PATH will contain the path to node, it has to be propagated to any other shell which needs to use it.
|
||||
# That's means it has to be added to any systemd script.
|
||||
#
|
||||
# usage: ynh_use_nodejs
|
||||
#
|
||||
|
@ -43,15 +68,26 @@ SOURCE_SUM=3983fa3f00d4bf85ba8e21f1a590f6e28938093abe0bb950aeea52b1717471fc" > "
|
|||
ynh_use_nodejs () {
|
||||
nodejs_version=$(ynh_app_setting_get --app=$app --key=nodejs_version)
|
||||
|
||||
nodejs_use_version="echo \"Deprecated command, should be removed\""
|
||||
|
||||
# Get the absolute path of this version of node
|
||||
nodejs_path="$node_version_path/$nodejs_version/bin"
|
||||
|
||||
# Allow alias to be used into bash script
|
||||
shopt -s expand_aliases
|
||||
|
||||
# Create an alias for the specific version of node and a variable as fallback
|
||||
ynh_node="$nodejs_path/node"
|
||||
alias ynh_node="$ynh_node"
|
||||
# And npm
|
||||
ynh_npm="$nodejs_path/npm"
|
||||
alias ynh_npm="$ynh_npm"
|
||||
|
||||
# Load the path of this version of node in $PATH
|
||||
if [[ :$PATH: != *":$nodejs_path"* ]]; then
|
||||
PATH="$nodejs_path:$PATH"
|
||||
fi
|
||||
node_PATH="$PATH"
|
||||
# Create an alias to easily load the PATH
|
||||
ynh_node_load_PATH="PATH=$node_PATH"
|
||||
}
|
||||
|
||||
# Install a specific version of nodejs
|
||||
|
@ -64,6 +100,8 @@ ynh_use_nodejs () {
|
|||
# n (Node version management) uses the PATH variable to store the path of the version of node it is going to use.
|
||||
# That's how it changes the version
|
||||
#
|
||||
# Refer to ynh_use_nodejs for more information about available commands and variables
|
||||
#
|
||||
# Requires YunoHost version 2.7.12 or higher.
|
||||
ynh_install_nodejs () {
|
||||
# Use n, https://github.com/tj/n to manage the nodejs versions
|
||||
|
@ -88,7 +126,10 @@ ynh_install_nodejs () {
|
|||
test -x /usr/bin/npm && mv /usr/bin/npm /usr/bin/npm_n
|
||||
|
||||
# If n is not previously setup, install it
|
||||
if ! test $(n --version > /dev/null 2>&1)
|
||||
if ! $n_install_dir/bin/n --version > /dev/null 2>&1
|
||||
then
|
||||
ynh_install_n
|
||||
elif dpkg --compare-versions "$($n_install_dir/bin/n --version)" lt $n_version
|
||||
then
|
||||
ynh_install_n
|
||||
fi
|
||||
|
|
|
@ -1,37 +1,37 @@
|
|||
#!/bin/bash
|
||||
|
||||
readonly YNH_DEFAULT_PHP_VERSION=7.0
|
||||
# Declare the actual php version to use.
|
||||
# A packager willing to use another version of php can override the variable into its _common.sh.
|
||||
readonly YNH_DEFAULT_PHP_VERSION=7.3
|
||||
# Declare the actual PHP version to use.
|
||||
# A packager willing to use another version of PHP can override the variable into its _common.sh.
|
||||
YNH_PHP_VERSION=${YNH_PHP_VERSION:-$YNH_DEFAULT_PHP_VERSION}
|
||||
|
||||
# Create a dedicated php-fpm config
|
||||
# Create a dedicated PHP-FPM config
|
||||
#
|
||||
# usage 1: ynh_add_fpm_config [--phpversion=7.X] [--use_template] [--package=packages] [--dedicated_service]
|
||||
# | arg: -v, --phpversion= - Version of php to use.
|
||||
# | arg: -v, --phpversion= - Version of PHP to use.
|
||||
# | arg: -t, --use_template - Use this helper in template mode.
|
||||
# | arg: -p, --package= - Additionnal php packages to install
|
||||
# | arg: -d, --dedicated_service - Use a dedicated php-fpm service instead of the common one.
|
||||
# | arg: -p, --package= - Additionnal PHP packages to install
|
||||
# | arg: -d, --dedicated_service - Use a dedicated PHP-FPM service instead of the common one.
|
||||
#
|
||||
# -----------------------------------------------------------------------------
|
||||
#
|
||||
# usage 2: ynh_add_fpm_config [--phpversion=7.X] --usage=usage --footprint=footprint [--package=packages] [--dedicated_service]
|
||||
# | arg: -v, --phpversion= - Version of php to use.
|
||||
# | arg: -v, --phpversion= - Version of PHP to use.
|
||||
# | arg: -f, --footprint= - Memory footprint of the service (low/medium/high).
|
||||
# low - Less than 20Mb of ram by pool.
|
||||
# medium - Between 20Mb and 40Mb of ram by pool.
|
||||
# high - More than 40Mb of ram by pool.
|
||||
# Or specify exactly the footprint, the load of the service as Mb by pool instead of having a standard value.
|
||||
# low - Less than 20 MB of RAM by pool.
|
||||
# medium - Between 20 MB and 40 MB of RAM by pool.
|
||||
# high - More than 40 MB of RAM by pool.
|
||||
# Or specify exactly the footprint, the load of the service as MB by pool instead of having a standard value.
|
||||
# To have this value, use the following command and stress the service.
|
||||
# watch -n0.5 ps -o user,cmd,%cpu,rss -u APP
|
||||
#
|
||||
# | arg: -u, --usage= - Expected usage of the service (low/medium/high).
|
||||
# low - Personal usage, behind the sso.
|
||||
# low - Personal usage, behind the SSO.
|
||||
# medium - Low usage, few people or/and publicly accessible.
|
||||
# high - High usage, frequently visited website.
|
||||
#
|
||||
# | arg: -p, --package= - Additionnal php packages to install for a specific version of php
|
||||
# | arg: -d, --dedicated_service - Use a dedicated php-fpm service instead of the common one.
|
||||
# | arg: -p, --package= - Additionnal PHP packages to install for a specific version of PHP
|
||||
# | arg: -d, --dedicated_service - Use a dedicated PHP-FPM service instead of the common one.
|
||||
#
|
||||
#
|
||||
# The footprint of the service will be used to defined the maximum footprint we can allow, which is half the maximum RAM.
|
||||
|
@ -85,7 +85,7 @@ ynh_add_fpm_config () {
|
|||
# Set the default PHP-FPM version by default
|
||||
phpversion="${phpversion:-$YNH_PHP_VERSION}"
|
||||
|
||||
# If the requested php version is not the default version for YunoHost
|
||||
# If the requested PHP version is not the default version for YunoHost
|
||||
if [ "$phpversion" != "$YNH_DEFAULT_PHP_VERSION" ]
|
||||
then
|
||||
# If the argument --package is used, add the packages to ynh_install_php to install them from sury
|
||||
|
@ -95,7 +95,7 @@ ynh_add_fpm_config () {
|
|||
else
|
||||
local additionnal_packages=""
|
||||
fi
|
||||
# Install this specific version of php.
|
||||
# Install this specific version of PHP.
|
||||
ynh_install_php --phpversion="$phpversion" "$additionnal_packages"
|
||||
elif [ -n "$package" ]
|
||||
then
|
||||
|
@ -118,7 +118,7 @@ ynh_add_fpm_config () {
|
|||
fpm_service="php5-fpm"
|
||||
fi
|
||||
|
||||
# Create the directory for fpm pools
|
||||
# Create the directory for FPM pools
|
||||
mkdir --parents "$fpm_config_dir/pool.d"
|
||||
|
||||
ynh_app_setting_set --app=$app --key=fpm_config_dir --value="$fpm_config_dir"
|
||||
|
@ -127,7 +127,7 @@ ynh_add_fpm_config () {
|
|||
ynh_app_setting_set --app=$app --key=phpversion --value=$phpversion
|
||||
finalphpconf="$fpm_config_dir/pool.d/$app.conf"
|
||||
|
||||
# Migrate from mutual php service to dedicated one.
|
||||
# Migrate from mutual PHP service to dedicated one.
|
||||
if [ $dedicated_service -eq 1 ]
|
||||
then
|
||||
local old_fpm_config_dir="/etc/php/$phpversion/fpm"
|
||||
|
@ -137,9 +137,9 @@ ynh_add_fpm_config () {
|
|||
ynh_print_info --message="Migrate to a dedicated php-fpm service for $app."
|
||||
# Create a backup of the old file before migration
|
||||
ynh_backup_if_checksum_is_different --file="$old_fpm_config_dir/pool.d/$app.conf"
|
||||
# Remove the old php config file
|
||||
# Remove the old PHP config file
|
||||
ynh_secure_remove --file="$old_fpm_config_dir/pool.d/$app.conf"
|
||||
# Reload php to release the socket and allow the dedicated service to use it
|
||||
# Reload PHP to release the socket and allow the dedicated service to use it
|
||||
ynh_systemd_action --service_name=php${phpversion}-fpm --action=reload
|
||||
fi
|
||||
fi
|
||||
|
@ -148,21 +148,27 @@ ynh_add_fpm_config () {
|
|||
|
||||
if [ $use_template -eq 1 ]
|
||||
then
|
||||
# Usage 1, use the template in ../conf/php-fpm.conf
|
||||
cp ../conf/php-fpm.conf "$finalphpconf"
|
||||
# Usage 1, use the template in conf/php-fpm.conf
|
||||
local phpfpm_path="../conf/php-fpm.conf"
|
||||
if [ ! -e "$phpfpm_path" ]; then
|
||||
phpfpm_path="../settings/conf/php-fpm.conf" # Into the restore script, the PHP-FPM template is not at the same place
|
||||
fi
|
||||
# Make sure now that the template indeed exists
|
||||
[ -e "$phpfpm_path" ] || ynh_die --message="Unable to find template to configure PHP-FPM."
|
||||
cp "$phpfpm_path" "$finalphpconf"
|
||||
ynh_replace_string --match_string="__NAMETOCHANGE__" --replace_string="$app" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string="__USER__" --replace_string="$app" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string="__PHPVERSION__" --replace_string="$phpversion" --target_file="$finalphpconf"
|
||||
|
||||
else
|
||||
# Usage 2, generate a php-fpm config file with ynh_get_scalable_phpfpm
|
||||
# Usage 2, generate a PHP-FPM config file with ynh_get_scalable_phpfpm
|
||||
|
||||
# Store settings
|
||||
ynh_app_setting_set --app=$app --key=fpm_footprint --value=$footprint
|
||||
ynh_app_setting_set --app=$app --key=fpm_usage --value=$usage
|
||||
|
||||
# Define the values to use for the configuration of php.
|
||||
# Define the values to use for the configuration of PHP.
|
||||
ynh_get_scalable_phpfpm --usage=$usage --footprint=$footprint
|
||||
|
||||
# Copy the default file
|
||||
|
@ -175,7 +181,7 @@ ynh_add_fpm_config () {
|
|||
ynh_replace_string --match_string="^group = .*" --replace_string="group = $app" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string=".*chdir = .*" --replace_string="chdir = $final_path" --target_file="$finalphpconf"
|
||||
|
||||
# Configure fpm children
|
||||
# Configure FPM children
|
||||
ynh_replace_string --match_string=".*pm = .*" --replace_string="pm = $php_pm" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string=".*pm.max_children = .*" --replace_string="pm.max_children = $php_max_children" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string=".*pm.max_requests = .*" --replace_string="pm.max_requests = 500" --target_file="$finalphpconf"
|
||||
|
@ -232,7 +238,7 @@ ynh_add_fpm_config () {
|
|||
ynh_replace_string --match_string="^[; ]*syslog.ident *=.*" --replace_string="syslog.ident = php-fpm-$app" --target_file="$globalphpconf"
|
||||
ynh_replace_string --match_string="^[; ]*include *=.*" --replace_string="include = $finalphpconf" --target_file="$globalphpconf"
|
||||
|
||||
# Create a config for a dedicated php-fpm service for the app
|
||||
# Create a config for a dedicated PHP-FPM service for the app
|
||||
echo "[Unit]
|
||||
Description=PHP $phpversion FastCGI Process Manager for $app
|
||||
After=network.target
|
||||
|
@ -247,7 +253,7 @@ ExecReload=/bin/kill -USR2 \$MAINPID
|
|||
WantedBy=multi-user.target
|
||||
" > ../conf/$fpm_service
|
||||
|
||||
# Create this dedicated php-fpm service
|
||||
# Create this dedicated PHP-FPM service
|
||||
ynh_add_systemd_config --service=$fpm_service --template=$fpm_service
|
||||
# Integrate the service in YunoHost admin panel
|
||||
yunohost service add $fpm_service --log /var/log/php/fpm-php.$app.log --log_type file --description "Php-fpm dedicated to $app"
|
||||
|
@ -256,12 +262,12 @@ WantedBy=multi-user.target
|
|||
# Restart the service, as this service is either stopped or only for this app
|
||||
ynh_systemd_action --service_name=$fpm_service --action=restart
|
||||
else
|
||||
# Reload php, to not impact other parts of the system using php
|
||||
# Reload PHP, to not impact other parts of the system using PHP
|
||||
ynh_systemd_action --service_name=$fpm_service --action=reload
|
||||
fi
|
||||
}
|
||||
|
||||
# Remove the dedicated php-fpm config
|
||||
# Remove the dedicated PHP-FPM config
|
||||
#
|
||||
# usage: ynh_remove_fpm_config
|
||||
#
|
||||
|
@ -271,13 +277,13 @@ ynh_remove_fpm_config () {
|
|||
local fpm_service=$(ynh_app_setting_get --app=$app --key=fpm_service)
|
||||
local dedicated_service=$(ynh_app_setting_get --app=$app --key=fpm_dedicated_service)
|
||||
dedicated_service=${dedicated_service:-0}
|
||||
# Get the version of php used by this app
|
||||
# Get the version of PHP used by this app
|
||||
local phpversion=$(ynh_app_setting_get $app phpversion)
|
||||
|
||||
# Assume default PHP-FPM version by default
|
||||
phpversion="${phpversion:-$YNH_DEFAULT_PHP_VERSION}"
|
||||
|
||||
# Assume default php files if not set
|
||||
# Assume default PHP files if not set
|
||||
if [ -z "$fpm_config_dir" ]
|
||||
then
|
||||
fpm_config_dir="/etc/php/$YNH_DEFAULT_PHP_VERSION/fpm"
|
||||
|
@ -286,34 +292,37 @@ ynh_remove_fpm_config () {
|
|||
|
||||
if [ $dedicated_service -eq 1 ]
|
||||
then
|
||||
# Remove the dedicated service php-fpm service for the app
|
||||
# Remove the dedicated service PHP-FPM service for the app
|
||||
ynh_remove_systemd_config --service=$fpm_service
|
||||
# Remove the global php-fpm conf
|
||||
# Remove the global PHP-FPM conf
|
||||
ynh_secure_remove --file="$fpm_config_dir/php-fpm-$app.conf"
|
||||
# Remove the service from the list of services known by Yunohost
|
||||
# Remove the service from the list of services known by YunoHost
|
||||
yunohost service remove $fpm_service
|
||||
elif ynh_package_is_installed --package="php${phpversion}-fpm"; then
|
||||
ynh_systemd_action --service_name=$fpm_service --action=reload
|
||||
fi
|
||||
|
||||
ynh_secure_remove --file="$fpm_config_dir/pool.d/$app.conf"
|
||||
ynh_exec_warn_less ynh_secure_remove --file="$fpm_config_dir/conf.d/20-$app.ini"
|
||||
if [ -e $fpm_config_dir/conf.d/20-$app.ini ]
|
||||
then
|
||||
ynh_secure_remove --file="$fpm_config_dir/conf.d/20-$app.ini"
|
||||
fi
|
||||
|
||||
# If the php version used is not the default version for YunoHost
|
||||
# If the PHP version used is not the default version for YunoHost
|
||||
if [ "$phpversion" != "$YNH_DEFAULT_PHP_VERSION" ]
|
||||
then
|
||||
# Remove this specific version of php
|
||||
# Remove this specific version of PHP
|
||||
ynh_remove_php
|
||||
fi
|
||||
}
|
||||
|
||||
# Install another version of php.
|
||||
# Install another version of PHP.
|
||||
#
|
||||
# [internal]
|
||||
#
|
||||
# usage: ynh_install_php --phpversion=phpversion [--package=packages]
|
||||
# | arg: -v, --phpversion= - Version of php to install.
|
||||
# | arg: -p, --package= - Additionnal php packages to install
|
||||
# | arg: -v, --phpversion= - Version of PHP to install.
|
||||
# | arg: -p, --package= - Additionnal PHP packages to install
|
||||
#
|
||||
# Requires YunoHost version 3.8.1 or higher.
|
||||
ynh_install_php () {
|
||||
|
@ -340,30 +349,32 @@ ynh_install_php () {
|
|||
# Do not add twice the same line
|
||||
if ! grep --quiet "$YNH_APP_INSTANCE_NAME:" "/etc/php/ynh_app_version"
|
||||
then
|
||||
# Store the ID of this app and the version of php requested for it
|
||||
# Store the ID of this app and the version of PHP requested for it
|
||||
echo "$YNH_APP_INSTANCE_NAME:$phpversion" | tee --append "/etc/php/ynh_app_version"
|
||||
fi
|
||||
|
||||
# Add an extra repository for those packages
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --priority=995 --name=extra_php_version
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --priority=995 --name=extra_php_version --priority=600
|
||||
|
||||
# Install requested dependencies from this extra repository.
|
||||
# Install php-fpm first, otherwise php will install apache as a dependency.
|
||||
# Install PHP-FPM first, otherwise PHP will install apache as a dependency.
|
||||
ynh_add_app_dependencies --package="php${phpversion}-fpm"
|
||||
ynh_add_app_dependencies --package="php$phpversion php${phpversion}-common $package"
|
||||
|
||||
# Set the default php version back as the default version for php-cli.
|
||||
# Set the default PHP version back as the default version for php-cli.
|
||||
update-alternatives --set php /usr/bin/php$YNH_DEFAULT_PHP_VERSION
|
||||
|
||||
# Pin this extra repository after packages are installed to prevent sury of doing shit
|
||||
ynh_pin_repo --package="*" --pin="origin \"packages.sury.org\"" --priority=200 --name=extra_php_version
|
||||
ynh_pin_repo --package="php${YNH_DEFAULT_PHP_VERSION}*" --pin="origin \"packages.sury.org\"" --priority=600 --name=extra_php_version --append
|
||||
for package_to_not_upgrade in "php" "php-fpm" "php-mysql" "php-xml" "php-zip" "php-mbstring" "php-ldap" "php-gd" "php-curl" "php-bz2" "php-json" "php-sqlite3" "php-intl" "openssl" "libssl1.1" "libssl-dev"
|
||||
do
|
||||
ynh_pin_repo --package="$package_to_not_upgrade" --pin="origin \"packages.sury.org\"" --priority="-1" --name=extra_php_version --append
|
||||
done
|
||||
|
||||
# Advertise service in admin panel
|
||||
yunohost service add php${phpversion}-fpm --log "/var/log/php${phpversion}-fpm.log"
|
||||
}
|
||||
|
||||
# Remove the specific version of php used by the app.
|
||||
# Remove the specific version of PHP used by the app.
|
||||
#
|
||||
# [internal]
|
||||
#
|
||||
|
@ -371,7 +382,7 @@ ynh_install_php () {
|
|||
#
|
||||
# Requires YunoHost version 3.8.1 or higher.
|
||||
ynh_remove_php () {
|
||||
# Get the version of php used by this app
|
||||
# Get the version of PHP used by this app
|
||||
local phpversion=$(ynh_app_setting_get $app phpversion)
|
||||
|
||||
if [ "$phpversion" == "$YNH_DEFAULT_PHP_VERSION" ] || [ -z "$phpversion" ]
|
||||
|
@ -389,7 +400,7 @@ ynh_remove_php () {
|
|||
# Remove the line for this app
|
||||
sed --in-place "/$YNH_APP_INSTANCE_NAME:$phpversion/d" "/etc/php/ynh_app_version"
|
||||
|
||||
# If no other app uses this version of php, remove it.
|
||||
# If no other app uses this version of PHP, remove it.
|
||||
if ! grep --quiet "$phpversion" "/etc/php/ynh_app_version"
|
||||
then
|
||||
# Remove the service from the admin panel
|
||||
|
@ -397,26 +408,26 @@ ynh_remove_php () {
|
|||
yunohost service remove php${phpversion}-fpm
|
||||
fi
|
||||
|
||||
# Purge php dependencies for this version.
|
||||
# Purge PHP dependencies for this version.
|
||||
ynh_package_autopurge "php$phpversion php${phpversion}-fpm php${phpversion}-common"
|
||||
fi
|
||||
}
|
||||
|
||||
# Define the values to configure php-fpm
|
||||
# Define the values to configure PHP-FPM
|
||||
#
|
||||
# [internal]
|
||||
#
|
||||
# usage: ynh_get_scalable_phpfpm --usage=usage --footprint=footprint [--print]
|
||||
# | arg: -f, --footprint= - Memory footprint of the service (low/medium/high).
|
||||
# low - Less than 20Mb of ram by pool.
|
||||
# medium - Between 20Mb and 40Mb of ram by pool.
|
||||
# high - More than 40Mb of ram by pool.
|
||||
# Or specify exactly the footprint, the load of the service as Mb by pool instead of having a standard value.
|
||||
# low - Less than 20 MB of RAM by pool.
|
||||
# medium - Between 20 MB and 40 MB of RAM by pool.
|
||||
# high - More than 40 MB of RAM by pool.
|
||||
# Or specify exactly the footprint, the load of the service as MB by pool instead of having a standard value.
|
||||
# To have this value, use the following command and stress the service.
|
||||
# watch -n0.5 ps -o user,cmd,%cpu,rss -u APP
|
||||
#
|
||||
# | arg: -u, --usage= - Expected usage of the service (low/medium/high).
|
||||
# low - Personal usage, behind the sso.
|
||||
# low - Personal usage, behind the SSO.
|
||||
# medium - Low usage, few people or/and publicly accessible.
|
||||
# high - High usage, frequently visited website.
|
||||
#
|
||||
|
@ -487,7 +498,7 @@ ynh_get_scalable_phpfpm () {
|
|||
|
||||
# Define pm.max_children
|
||||
# The value of pm.max_children is the total amount of ram divide by 2 and divide again by the footprint of a pool for this app.
|
||||
# So if php-fpm start the maximum of children, it won't exceed half of the ram.
|
||||
# So if PHP-FPM start the maximum of children, it won't exceed half of the ram.
|
||||
php_max_children=$(( $max_ram / 2 / $footprint ))
|
||||
# If process manager is set as static, use half less children.
|
||||
# Used as static, there's always as many children as the value of pm.max_children
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
PSQL_ROOT_PWD_FILE=/etc/yunohost/psql
|
||||
PSQL_VERSION=11
|
||||
|
||||
# Open a connection as a user
|
||||
#
|
||||
|
@ -45,8 +46,13 @@ ynh_psql_execute_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
ynh_psql_connect_as --user="postgres" --password="$(cat $PSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" <<<"$sql"
|
||||
$database <<<"$sql"
|
||||
}
|
||||
|
||||
# Execute a command from a file as root user
|
||||
|
@ -66,8 +72,13 @@ ynh_psql_execute_file_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
ynh_psql_connect_as --user="postgres" --password="$(cat $PSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" <"$file"
|
||||
$database <"$file"
|
||||
}
|
||||
|
||||
# Create a database and grant optionnaly privilegies to a user
|
||||
|
@ -212,7 +223,7 @@ ynh_psql_drop_user() {
|
|||
# usage: ynh_psql_setup_db --db_user=user --db_name=name [--db_pwd=pwd]
|
||||
# | arg: -u, --db_user= - Owner of the database
|
||||
# | arg: -n, --db_name= - Name of the database
|
||||
# | arg: -p, --db_pwd= - Password of the database. If not given, a password will be generated
|
||||
# | arg: -p, --db_pwd= - Password of the database. If not provided, a password will be generated
|
||||
#
|
||||
# After executing this helper, the password of the created database will be available in $db_pwd
|
||||
# It will also be stored as "psqlpwd" into the app settings.
|
||||
|
@ -228,12 +239,14 @@ ynh_psql_setup_db() {
|
|||
# Manage arguments with getopts
|
||||
ynh_handle_getopts_args "$@"
|
||||
|
||||
local new_db_pwd=$(ynh_string_random) # Generate a random password
|
||||
# If $db_pwd is not given, use new_db_pwd instead for db_pwd
|
||||
db_pwd="${db_pwd:-$new_db_pwd}"
|
||||
|
||||
if ! ynh_psql_user_exists --user=$db_user; then
|
||||
local new_db_pwd=$(ynh_string_random) # Generate a random password
|
||||
# If $db_pwd is not provided, use new_db_pwd instead for db_pwd
|
||||
db_pwd="${db_pwd:-$new_db_pwd}"
|
||||
|
||||
ynh_psql_create_user "$db_user" "$db_pwd"
|
||||
elif [ -z $db_pwd ]; then
|
||||
ynh_die --message="The user $db_user exists, please provide his password"
|
||||
fi
|
||||
|
||||
ynh_psql_create_db "$db_name" "$db_user" # Create the database
|
||||
|
@ -273,46 +286,46 @@ ynh_psql_remove_db() {
|
|||
}
|
||||
|
||||
# Create a master password and set up global settings
|
||||
# It also make sure that postgresql is installed and running
|
||||
# Please always call this script in install and restore scripts
|
||||
#
|
||||
# usage: ynh_psql_test_if_first_run
|
||||
#
|
||||
# Requires YunoHost version 2.7.13 or higher.
|
||||
ynh_psql_test_if_first_run() {
|
||||
if [ -f "$PSQL_ROOT_PWD_FILE" ]
|
||||
|
||||
# Make sure postgresql is indeed installed
|
||||
dpkg --list | grep -q "ii postgresql-$PSQL_VERSION" || ynh_die "postgresql-$PSQL_VERSION is not installed !?"
|
||||
|
||||
# Check for some weird issue where postgresql could be installed but etc folder would not exist ...
|
||||
[ -e "/etc/postgresql/$PSQL_VERSION" ] || ynh_die "It looks like postgresql was not properly configured ? /etc/postgresql/$PSQL_VERSION is missing ... Could be due to a locale issue, c.f.https://serverfault.com/questions/426989/postgresql-etc-postgresql-doesnt-exist"
|
||||
|
||||
# Make sure postgresql is started and enabled
|
||||
# (N.B. : to check the active state, we check the cluster state because
|
||||
# postgresql could be flagged as active even though the cluster is in
|
||||
# failed state because of how the service is configured..)
|
||||
systemctl is-active postgresql@$PSQL_VERSION-main -q || ynh_systemd_action --service_name=postgresql --action=restart
|
||||
systemctl is-enabled postgresql -q || systemctl enable postgresql --quiet
|
||||
|
||||
# If this is the very first time, we define the root password
|
||||
# and configure a few things
|
||||
if [ ! -f "$PSQL_ROOT_PWD_FILE" ]
|
||||
then
|
||||
ynh_print_info --message="PostgreSQL is already installed, no need to create master password"
|
||||
return
|
||||
local pg_hba=/etc/postgresql/$PSQL_VERSION/main/pg_hba.conf
|
||||
|
||||
local psql_root_password="$(ynh_string_random)"
|
||||
echo "$psql_root_password" >$PSQL_ROOT_PWD_FILE
|
||||
sudo --login --user=postgres psql -c"ALTER user postgres WITH PASSWORD '$psql_root_password'" postgres
|
||||
|
||||
# force all user to connect to local databases using hashed passwords
|
||||
# https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html#EXAMPLE-PG-HBA.CONF
|
||||
# Note: we can't use peer since YunoHost create users with nologin
|
||||
# See: https://github.com/YunoHost/yunohost/blob/unstable/data/helpers.d/user
|
||||
ynh_replace_string --match_string="local\(\s*\)all\(\s*\)all\(\s*\)peer" --replace_string="local\1all\2all\3md5" --target_file="$pg_hba"
|
||||
|
||||
# Integrate postgresql service in yunohost
|
||||
yunohost service add postgresql --log "/var/log/postgresql/"
|
||||
|
||||
ynh_systemd_action --service_name=postgresql --action=reload
|
||||
fi
|
||||
|
||||
local psql_root_password="$(ynh_string_random)"
|
||||
echo "$psql_root_password" >$PSQL_ROOT_PWD_FILE
|
||||
|
||||
if [ -e /etc/postgresql/9.4/ ]
|
||||
then
|
||||
local pg_hba=/etc/postgresql/9.4/main/pg_hba.conf
|
||||
local logfile=/var/log/postgresql/postgresql-9.4-main.log
|
||||
elif [ -e /etc/postgresql/9.6/ ]
|
||||
then
|
||||
local pg_hba=/etc/postgresql/9.6/main/pg_hba.conf
|
||||
local logfile=/var/log/postgresql/postgresql-9.6-main.log
|
||||
else
|
||||
ynh_die "postgresql shoud be 9.4 or 9.6"
|
||||
fi
|
||||
|
||||
ynh_systemd_action --service_name=postgresql --action=start
|
||||
|
||||
sudo --login --user=postgres psql -c"ALTER user postgres WITH PASSWORD '$psql_root_password'" postgres
|
||||
|
||||
# force all user to connect to local databases using hashed passwords
|
||||
# https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html#EXAMPLE-PG-HBA.CONF
|
||||
# Note: we can't use peer since YunoHost create users with nologin
|
||||
# See: https://github.com/YunoHost/yunohost/blob/unstable/data/helpers.d/user
|
||||
ynh_replace_string --match_string="local\(\s*\)all\(\s*\)all\(\s*\)peer" --replace_string="local\1all\2all\3md5" --target_file="$pg_hba"
|
||||
|
||||
# Advertise service in admin panel
|
||||
yunohost service add postgresql --log "$logfile"
|
||||
|
||||
systemctl enable postgresql
|
||||
ynh_systemd_action --service_name=postgresql --action=reload
|
||||
}
|
||||
|
|
|
@ -153,39 +153,74 @@ ynh_webpath_register () {
|
|||
|
||||
# Create a new permission for the app
|
||||
#
|
||||
# example: ynh_permission_create --permission admin --url /admin --additional_urls 'domain.tld/otherurl /superadmin' --allowed alice bob --label 'My app admin'
|
||||
# example 1: ynh_permission_create --permission=admin --url=/admin --additional_urls=domain.tld/admin /superadmin --allowed=alice bob \
|
||||
# --label="My app admin" --show_tile=true
|
||||
#
|
||||
# usage: ynh_permission_create --permission "permission" [--url "url"] [--additional_urls "second-url" [ "other-url" ]] [--auth_header true|false]
|
||||
# [--allowed group1 [ group2 ]] [--label "label"] [--show_tile true|false]
|
||||
# [--protected true|false]
|
||||
# | arg: permission - the name for the permission (by default a permission named "main" already exist)
|
||||
# | arg: url - (optional) URL for which access will be allowed/forbidden
|
||||
# | arg: additional_urls - (optional) List of additional URL for which access will be allowed/forbidden
|
||||
# | arg: auth_header - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application. Default is true
|
||||
# | arg: allowed - (optional) A list of group/user to allow for the permission
|
||||
# | arg: label - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin.
|
||||
# | Default is "APP_LABEL (permission name)".
|
||||
# | arg: show_tile - (optional) Define if a tile will be shown in the SSO
|
||||
# | arg: protected - (optional) Define if this permission is protected. If it is protected the administrator
|
||||
# This example will create a new permission permission with this following effect:
|
||||
# - A tile named "My app admin" in the SSO will be available for the users alice and bob. This tile will point to the relative url '/admin'.
|
||||
# - Only the user alice and bob will have the access to theses following url: /admin, domain.tld/admin, /superadmin
|
||||
#
|
||||
#
|
||||
# example 2: ynh_permission_create --permission=api --url=domain.tld/api --auth_header=false --allowed=visitors \
|
||||
# --label="MyApp API" --protected=true
|
||||
#
|
||||
# This example will create a new protected permission. So the admin won't be able to add/remove the visitors group of this permission.
|
||||
# In case of an API with need to be always public it avoid that the admin break anything.
|
||||
# With this permission all client will be allowed to access to the url 'domain.tld/api'.
|
||||
# Note that in this case no tile will be show on the SSO.
|
||||
# Note that the auth_header parameter is to 'false'. So no authentication header will be passed to the application.
|
||||
# Generally the API is requested by an application and enabling the auth_header has no advantage and could bring some issues in some case.
|
||||
# So in this case it's better to disable this option for all API.
|
||||
#
|
||||
#
|
||||
# usage: ynh_permission_create --permission="permission" [--url="url"] [--additional_urls="second-url" [ "third-url" ]] [--auth_header=true|false]
|
||||
# [--allowed=group1 [ group2 ]] [--label="label"] [--show_tile=true|false]
|
||||
# [--protected=true|false]
|
||||
# | arg: -p, permission= - the name for the permission (by default a permission named "main" already exist)
|
||||
# | arg: -u, url= - (optional) URL for which access will be allowed/forbidden.
|
||||
# | Not that if 'show_tile' is enabled, this URL will be the URL of the tile.
|
||||
# | arg: -A, additional_urls= - (optional) List of additional URL for which access will be allowed/forbidden
|
||||
# | arg: -h, auth_header= - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application. Default is true
|
||||
# | arg: -a, allowed= - (optional) A list of group/user to allow for the permission
|
||||
# | arg: -l, label= - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin.
|
||||
# | Default is "APP_LABEL (permission name)".
|
||||
# | arg: -t, show_tile= - (optional) Define if a tile will be shown in the SSO. If yes the name of the tile will be the 'label' parameter.
|
||||
# | Default is false (for the permission different than 'main').
|
||||
# | arg: -P, protected= - (optional) Define if this permission is protected. If it is protected the administrator
|
||||
# | won't be able to add or remove the visitors group of this permission.
|
||||
# | By default it's 'true' (for the permission different than 'main').
|
||||
# | By default it's 'false'
|
||||
#
|
||||
# If provided, 'url' is assumed to be relative to the app domain/path if they
|
||||
# If provided, 'url' or 'additional_urls' is assumed to be relative to the app domain/path if they
|
||||
# start with '/'. For example:
|
||||
# / -> domain.tld/app
|
||||
# /admin -> domain.tld/app/admin
|
||||
# domain.tld/app/api -> domain.tld/app/api
|
||||
#
|
||||
# 'url' can be later treated as a regex if it starts with "re:".
|
||||
# 'url' or 'additional_urls' can be treated as a PCRE (not lua) regex if it starts with "re:".
|
||||
# For example:
|
||||
# re:/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
|
||||
# re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
|
||||
#
|
||||
# Note that globally the parameter 'url' and 'additional_urls' are same. The only difference is:
|
||||
# - 'url' is only one url, 'additional_urls' can be a list of urls. There are no limitation of 'additional_urls'
|
||||
# - 'url' is used for the url of tile in the SSO (if enabled with the 'show_tile' parameter)
|
||||
#
|
||||
#
|
||||
# About the authentication header (auth_header parameter).
|
||||
# The SSO pass (by default) to the application theses following HTTP header (linked to the authenticated user) to the application:
|
||||
# - "Auth-User": username
|
||||
# - "Remote-User": username
|
||||
# - "Email": user email
|
||||
#
|
||||
# Generally this feature is usefull to authenticate automatically the user in the application but in some case the application don't work with theses header and theses header need to be disabled to have the application to work correctly.
|
||||
# See https://github.com/YunoHost/issues/issues/1420 for more informations
|
||||
#
|
||||
#
|
||||
# Requires YunoHost version 3.7.0 or higher.
|
||||
ynh_permission_create() {
|
||||
# Declare an array to define the options of this helper.
|
||||
local legacy_args=puAhaltP
|
||||
declare -A args_array=( [p]=permission= [u]=url= [A]=additional_urls= [h]=auth_header= [a]=allowed= [l]=label= [t]=show_tile= [P]=protected= )
|
||||
local -A args_array=( [p]=permission= [u]=url= [A]=additional_urls= [h]=auth_header= [a]=allowed= [l]=label= [t]=show_tile= [P]=protected= )
|
||||
local permission
|
||||
local url
|
||||
local additional_urls
|
||||
|
@ -210,7 +245,13 @@ ynh_permission_create() {
|
|||
|
||||
if [[ -n $additional_urls ]]
|
||||
then
|
||||
additional_urls=",additional_urls=['${additional_urls//';'/"','"}']"
|
||||
# Convert a list from getopts to python list
|
||||
# Note that getopts separate the args with ';'
|
||||
# By example:
|
||||
# --additional_urls /urlA /urlB
|
||||
# will be:
|
||||
# additional_urls=['/urlA', '/urlB']
|
||||
additional_urls=",additional_urls=['${additional_urls//;/\',\'}']"
|
||||
fi
|
||||
|
||||
if [[ -n $auth_header ]]
|
||||
|
@ -223,8 +264,15 @@ ynh_permission_create() {
|
|||
fi
|
||||
fi
|
||||
|
||||
if [[ -n $allowed ]]; then
|
||||
allowed=",allowed=['${allowed//';'/"','"}']"
|
||||
if [[ -n $allowed ]]
|
||||
then
|
||||
# Convert a list from getopts to python list
|
||||
# Note that getopts separate the args with ';'
|
||||
# By example:
|
||||
# --allowed alice bob
|
||||
# will be:
|
||||
# allowed=['alice', 'bob']
|
||||
allowed=",allowed=['${allowed//;/\',\'}']"
|
||||
fi
|
||||
|
||||
if [[ -n ${label:-} ]]; then
|
||||
|
@ -233,16 +281,20 @@ ynh_permission_create() {
|
|||
label=",label='$YNH_APP_LABEL ($permission)'"
|
||||
fi
|
||||
|
||||
if [[ -n ${show_tile:-} ]]; then
|
||||
if [ $show_tile == "true" ]; then
|
||||
if [[ -n ${show_tile:-} ]]
|
||||
then
|
||||
if [ $show_tile == "true" ]
|
||||
then
|
||||
show_tile=",show_tile=True"
|
||||
else
|
||||
show_tile=",show_tile=False"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n ${protected:-} ]]; then
|
||||
if [ $protected == "true" ]; then
|
||||
if [[ -n ${protected:-} ]]
|
||||
then
|
||||
if [ $protected == "true" ]
|
||||
then
|
||||
protected=",protected=True"
|
||||
else
|
||||
protected=",protected=False"
|
||||
|
@ -289,21 +341,21 @@ ynh_permission_exists() {
|
|||
|
||||
# Redefine the url associated to a permission
|
||||
#
|
||||
# usage: ynh_permission_url --permission "permission" [--url "url"] [--add_url "new-url" [ "other-new-url" ]] [--remove_url "old-url" [ "other-old-url"]]
|
||||
# [--auth_header true|false][--clear_urls]
|
||||
# | arg: permission - the name for the permission (by default a permission named "main" is removed automatically when the app is removed)
|
||||
# | arg: url - (optional) URL for which access will be allowed/forbidden.
|
||||
# usage: ynh_permission_url --permission "permission" [--url="url"] [--add_url="new-url" [ "other-new-url" ]] [--remove_url="old-url" [ "other-old-url" ]]
|
||||
# [--auth_header=true|false] [--clear_urls]
|
||||
# | arg: -p, permission= - the name for the permission (by default a permission named "main" is removed automatically when the app is removed)
|
||||
# | arg: -u, url= - (optional) URL for which access will be allowed/forbidden.
|
||||
# | Note that if you want to remove url you can pass an empty sting as arguments ("").
|
||||
# | arg: add_url - (optional) List of additional url to add for which access will be allowed/forbidden.
|
||||
# | arg: remove_url - (optional) List of additional url to remove for which access will be allowed/forbidden
|
||||
# | arg: auth_header - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application
|
||||
# | arg: clear_urls - (optional) Clean all urls (url and additional_urls)
|
||||
# | arg: -a, add_url= - (optional) List of additional url to add for which access will be allowed/forbidden.
|
||||
# | arg: -r, remove_url= - (optional) List of additional url to remove for which access will be allowed/forbidden
|
||||
# | arg: -h, auth_header= - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application
|
||||
# | arg: -c, clear_urls - (optional) Clean all urls (url and additional_urls)
|
||||
#
|
||||
# Requires YunoHost version 3.7.0 or higher.
|
||||
ynh_permission_url() {
|
||||
# Declare an array to define the options of this helper.
|
||||
local legacy_args=puarhc
|
||||
declare -A args_array=([p]=permission= [u]=url= [a]=add_url= [r]=remove_url= [h]=auth_header= [c]=clear_urls)
|
||||
local -A args_array=( [p]=permission= [u]=url= [a]=add_url= [r]=remove_url= [h]=auth_header= [c]=clear_urls )
|
||||
local permission
|
||||
local url
|
||||
local add_url
|
||||
|
@ -324,15 +376,30 @@ ynh_permission_url() {
|
|||
|
||||
if [[ -n $add_url ]]
|
||||
then
|
||||
add_url=",add_url=['${add_url//';'/"','"}']"
|
||||
# Convert a list from getopts to python list
|
||||
# Note that getopts separate the args with ';'
|
||||
# By example:
|
||||
# --add_url /urlA /urlB
|
||||
# will be:
|
||||
# add_url=['/urlA', '/urlB']
|
||||
add_url=",add_url=['${add_url//;/\',\'}']"
|
||||
fi
|
||||
|
||||
if [[ -n $remove_url ]]; then
|
||||
remove_url=",remove_url=['${remove_url//';'/"','"}']"
|
||||
if [[ -n $remove_url ]]
|
||||
then
|
||||
# Convert a list from getopts to python list
|
||||
# Note that getopts separate the args with ';'
|
||||
# By example:
|
||||
# --remove_url /urlA /urlB
|
||||
# will be:
|
||||
# remove_url=['/urlA', '/urlB']
|
||||
remove_url=",remove_url=['${remove_url//;/\',\'}']"
|
||||
fi
|
||||
|
||||
if [[ -n $auth_header ]]; then
|
||||
if [ $auth_header == "true" ]; then
|
||||
if [[ -n $auth_header ]]
|
||||
then
|
||||
if [ $auth_header == "true" ]
|
||||
then
|
||||
auth_header=",auth_header=True"
|
||||
else
|
||||
auth_header=",auth_header=False"
|
||||
|
@ -350,21 +417,21 @@ ynh_permission_url() {
|
|||
|
||||
# Update a permission for the app
|
||||
#
|
||||
# usage: ynh_permission_update --permission "permission" [--add "group" ["group" ...]] [--remove "group" ["group" ...]]
|
||||
# [--label "label"] [--show_tile true|false] [--protected true|false]
|
||||
# | arg: permission - the name for the permission (by default a permission named "main" already exist)
|
||||
# | arg: add - the list of group or users to enable add to the permission
|
||||
# | arg: remove - the list of group or users to remove from the permission
|
||||
# | arg: label - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin.
|
||||
# | arg: show_tile - (optional) Define if a tile will be shown in the SSO
|
||||
# | arg: protected - (optional) Define if this permission is protected. If it is protected the administrator
|
||||
# usage: ynh_permission_update --permission "permission" [--add="group" ["group" ...]] [--remove="group" ["group" ...]]
|
||||
# [--label="label"] [--show_tile=true|false] [--protected=true|false]
|
||||
# | arg: -p, permission= - the name for the permission (by default a permission named "main" already exist)
|
||||
# | arg: -a, add= - the list of group or users to enable add to the permission
|
||||
# | arg: -r, remove= - the list of group or users to remove from the permission
|
||||
# | arg: -l, label= - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin.
|
||||
# | arg: -t, show_tile= - (optional) Define if a tile will be shown in the SSO
|
||||
# | arg: -P, protected= - (optional) Define if this permission is protected. If it is protected the administrator
|
||||
# | won't be able to add or remove the visitors group of this permission.
|
||||
#
|
||||
# Requires YunoHost version 3.7.0 or higher.
|
||||
ynh_permission_update() {
|
||||
# Declare an array to define the options of this helper.
|
||||
local legacy_args=parlsp
|
||||
declare -A args_array=( [p]=permission= [a]=add= [r]=remove= [l]=label= [t]=show_tile= [P]=protected= )
|
||||
local legacy_args=parltP
|
||||
local -A args_array=( [p]=permission= [a]=add= [r]=remove= [l]=label= [t]=show_tile= [P]=protected= )
|
||||
local permission
|
||||
local add
|
||||
local remove
|
||||
|
@ -380,10 +447,22 @@ ynh_permission_update() {
|
|||
|
||||
if [[ -n $add ]]
|
||||
then
|
||||
# Convert a list from getopts to python list
|
||||
# Note that getopts separate the args with ';'
|
||||
# By example:
|
||||
# --add alice bob
|
||||
# will be:
|
||||
# add=['alice', 'bob']
|
||||
add=",add=['${add//';'/"','"}']"
|
||||
fi
|
||||
if [[ -n $remove ]]
|
||||
then
|
||||
# Convert a list from getopts to python list
|
||||
# Note that getopts separate the args with ';'
|
||||
# By example:
|
||||
# --remove alice bob
|
||||
# will be:
|
||||
# remove=['alice', 'bob']
|
||||
remove=",remove=['${remove//';'/"','"}']"
|
||||
fi
|
||||
|
||||
|
@ -392,8 +471,10 @@ ynh_permission_update() {
|
|||
label=",label='$label'"
|
||||
fi
|
||||
|
||||
if [[ -n $show_tile ]]; then
|
||||
if [ $show_tile == "true" ]; then
|
||||
if [[ -n $show_tile ]]
|
||||
then
|
||||
if [ $show_tile == "true" ]
|
||||
then
|
||||
show_tile=",show_tile=True"
|
||||
else
|
||||
show_tile=",show_tile=False"
|
||||
|
@ -401,7 +482,8 @@ ynh_permission_update() {
|
|||
fi
|
||||
|
||||
if [[ -n $protected ]]; then
|
||||
if [ $protected == "true" ]; then
|
||||
if [ $protected == "true" ]
|
||||
then
|
||||
protected=",protected=True"
|
||||
else
|
||||
protected=",protected=False"
|
||||
|
|
|
@ -3,8 +3,10 @@
|
|||
# Create a dedicated systemd config
|
||||
#
|
||||
# usage: ynh_add_systemd_config [--service=service] [--template=template]
|
||||
# usage: ynh_add_systemd_config [--service=service] [--template=template] [--others_var="list of others variables to replace"]
|
||||
# | arg: -s, --service= - Service name (optionnal, $app by default)
|
||||
# | arg: -t, --template= - Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template)
|
||||
# | arg: -v, --others_var= - List of others variables to replace separated by a space. For example: 'var_1 var_2 ...'
|
||||
#
|
||||
# This will use the template ../conf/<templatename>.service
|
||||
# to generate a systemd config, by replacing the following keywords
|
||||
|
@ -14,17 +16,23 @@
|
|||
# __APP__ by $app
|
||||
# __FINALPATH__ by $final_path
|
||||
#
|
||||
# And dynamic variables (from the last example) :
|
||||
# __VAR_1__ by $var_1
|
||||
# __VAR_2__ by $var_2
|
||||
#
|
||||
# Requires YunoHost version 2.7.11 or higher.
|
||||
ynh_add_systemd_config () {
|
||||
# Declare an array to define the options of this helper.
|
||||
local legacy_args=st
|
||||
local -A args_array=( [s]=service= [t]=template= )
|
||||
local legacy_args=stv
|
||||
local -A args_array=( [s]=service= [t]=template= [v]=others_var= )
|
||||
local service
|
||||
local template
|
||||
local others_var
|
||||
# Manage arguments with getopts
|
||||
ynh_handle_getopts_args "$@"
|
||||
local service="${service:-$app}"
|
||||
local template="${template:-systemd.service}"
|
||||
others_var="${others_var:-}"
|
||||
|
||||
finalsystemdconf="/etc/systemd/system/$service.service"
|
||||
ynh_backup_if_checksum_is_different --file="$finalsystemdconf"
|
||||
|
@ -38,10 +46,19 @@ ynh_add_systemd_config () {
|
|||
if [ -n "${app:-}" ]; then
|
||||
ynh_replace_string --match_string="__APP__" --replace_string="$app" --target_file="$finalsystemdconf"
|
||||
fi
|
||||
|
||||
# Replace all other variables given as arguments
|
||||
for var_to_replace in $others_var
|
||||
do
|
||||
# ${var_to_replace^^} make the content of the variable on upper-cases
|
||||
# ${!var_to_replace} get the content of the variable named $var_to_replace
|
||||
ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalsystemdconf"
|
||||
done
|
||||
|
||||
ynh_store_file_checksum --file="$finalsystemdconf"
|
||||
|
||||
chown root: "$finalsystemdconf"
|
||||
systemctl enable $service
|
||||
systemctl enable $service --quiet
|
||||
systemctl daemon-reload
|
||||
}
|
||||
|
||||
|
@ -64,7 +81,7 @@ ynh_remove_systemd_config () {
|
|||
if [ -e "$finalsystemdconf" ]
|
||||
then
|
||||
ynh_systemd_action --service_name=$service --action=stop
|
||||
systemctl disable $service
|
||||
systemctl disable $service --quiet
|
||||
ynh_secure_remove --file="$finalsystemdconf"
|
||||
systemctl daemon-reload
|
||||
fi
|
||||
|
@ -128,7 +145,7 @@ ynh_systemd_action() {
|
|||
if ! systemctl $action $service_name
|
||||
then
|
||||
# Show syslog for this service
|
||||
ynh_exec_err journalctl --no-pager --lines=$length --unit=$service_name
|
||||
ynh_exec_err journalctl --quiet --no-hostname --no-pager --lines=$length --unit=$service_name
|
||||
# If a log is specified for this service, show also the content of this log
|
||||
if [ -e "$log_path" ]
|
||||
then
|
||||
|
@ -166,7 +183,7 @@ ynh_systemd_action() {
|
|||
then
|
||||
ynh_print_warn --message="The service $service_name didn't fully executed the action ${action} before the timeout."
|
||||
ynh_print_warn --message="Please find here an extract of the end of the log of the service $service_name:"
|
||||
ynh_exec_warn journalctl --no-pager --lines=$length --unit=$service_name
|
||||
ynh_exec_warn journalctl --quiet --no-hostname --no-pager --lines=$length --unit=$service_name
|
||||
if [ -e "$log_path" ]
|
||||
then
|
||||
ynh_print_warn --message="\-\-\-"
|
||||
|
|
|
@ -35,7 +35,10 @@ ynh_exit_properly () {
|
|||
ynh_clean_setup # Call the function to do specific cleaning for the app.
|
||||
fi
|
||||
|
||||
ynh_die # Exit with error status
|
||||
# Exit with error status
|
||||
# We don't call ynh_die basically to avoid unecessary 10-ish
|
||||
# debug lines about parsing args and stuff just to exit 1..
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Exits if an error occurs during the execution of the script.
|
||||
|
@ -141,7 +144,13 @@ ynh_setup_source () {
|
|||
then # Use the local source file if it is present
|
||||
cp $local_src $src_filename
|
||||
else # If not, download the source
|
||||
local out=`wget --no-verbose --output-document=$src_filename $src_url 2>&1` || ynh_print_err --message="$out"
|
||||
# NB. we have to declare the var as local first,
|
||||
# otherwise 'local foo=$(false) || echo 'pwet'" does'nt work
|
||||
# because local always return 0 ...
|
||||
local out
|
||||
# Timeout option is here to enforce the timeout on dns query and tcp connect (c.f. man wget)
|
||||
out=$(wget --tries 3 --no-dns-cache --timeout 900 --no-verbose --output-document=$src_filename $src_url 2>&1) \
|
||||
|| ynh_die --message="$out"
|
||||
fi
|
||||
|
||||
# Check the control sum
|
||||
|
|
|
@ -10,7 +10,8 @@ source /usr/share/yunohost/helpers
|
|||
backup_dir="${1}/conf/ldap"
|
||||
|
||||
# Backup the configuration
|
||||
ynh_backup "/etc/ldap/slapd.conf" "${backup_dir}/slapd.conf"
|
||||
ynh_backup "/etc/ldap/ldap.conf" "${backup_dir}/ldap.conf"
|
||||
ynh_backup "/etc/ldap/slapd.ldif" "${backup_dir}/slapd.ldif"
|
||||
slapcat -b cn=config -l "${backup_dir}/cn=config.master.ldif"
|
||||
|
||||
# Backup the database
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Exit hook on subcommand error or unset variable
|
||||
set -eu
|
||||
|
||||
# Source YNH helpers
|
||||
source /usr/share/yunohost/helpers
|
||||
|
||||
# Backup destination
|
||||
backup_dir="${1}/conf/ynh/mysql"
|
||||
|
||||
# Save MySQL root password
|
||||
ynh_backup "/etc/yunohost/mysql" "${backup_dir}/root_pwd"
|
|
@ -55,14 +55,63 @@ do_pre_regen() {
|
|||
fi
|
||||
|
||||
# add cron job for diagnosis to be ran at 7h and 19h + a random delay between
|
||||
# 0 and 10min, meant to avoid every instances running their diagnosis at
|
||||
# 0 and 20min, meant to avoid every instances running their diagnosis at
|
||||
# exactly the same time, which may overload the diagnosis server.
|
||||
mkdir -p $pending_dir/etc/cron.d/
|
||||
cat > $pending_dir/etc/cron.d/yunohost-diagnosis << EOF
|
||||
SHELL=/bin/bash
|
||||
0 7,19 * * * root : YunoHost Diagnosis; sleep \$((RANDOM\\%600)); yunohost diagnosis run > /dev/null
|
||||
0 7,19 * * * root : YunoHost Automatic Diagnosis; sleep \$((RANDOM\\%1200)); yunohost diagnosis run --email > /dev/null 2>/dev/null || echo "Running the automatic diagnosis failed miserably"
|
||||
EOF
|
||||
|
||||
# legacy stuff to avoid yunohost reporting etckeeper as manually modified
|
||||
# (this make sure that the hash is null / file is flagged as to-delete)
|
||||
mkdir -p $pending_dir/etc/etckeeper
|
||||
touch $pending_dir/etc/etckeeper/etckeeper.conf
|
||||
|
||||
# Skip ntp if inside a container (inspired from the conf of systemd-timesyncd)
|
||||
mkdir -p ${pending_dir}/etc/systemd/system/ntp.service.d/
|
||||
echo "
|
||||
[Unit]
|
||||
ConditionCapability=CAP_SYS_TIME
|
||||
ConditionVirtualization=!container
|
||||
" > ${pending_dir}/etc/systemd/system/ntp.service.d/ynh-override.conf
|
||||
|
||||
# Make nftable conflict with yunohost-firewall
|
||||
mkdir -p ${pending_dir}/etc/systemd/system/nftables.service.d/
|
||||
cat > ${pending_dir}/etc/systemd/system/nftables.service.d/ynh-override.conf << EOF
|
||||
[Unit]
|
||||
# yunohost-firewall and nftables conflict with each other
|
||||
Conflicts=yunohost-firewall.service
|
||||
ConditionFileIsExecutable=!/etc/init.d/yunohost-firewall
|
||||
ConditionPathExists=!/etc/systemd/system/multi-user.target.wants/yunohost-firewall.service
|
||||
EOF
|
||||
}
|
||||
|
||||
do_post_regen() {
|
||||
regen_conf_files=$1
|
||||
|
||||
######################
|
||||
# Enfore permissions #
|
||||
######################
|
||||
|
||||
# Certs
|
||||
# We do this with find because there could be a lot of them...
|
||||
chown -R root:ssl-cert /etc/yunohost/certs
|
||||
chmod 750 /etc/yunohost/certs
|
||||
find /etc/yunohost/certs/ -type f -exec chmod 640 {} \;
|
||||
find /etc/yunohost/certs/ -type d -exec chmod 750 {} \;
|
||||
|
||||
# Misc configuration / state files
|
||||
chown root:root $(ls /etc/yunohost/{*.yml,*.yaml,*.json,mysql,psql} 2>/dev/null)
|
||||
chmod 600 $(ls /etc/yunohost/{*.yml,*.yaml,*.json,mysql,psql} 2>/dev/null)
|
||||
|
||||
# Apps folder, custom hooks folder
|
||||
[[ ! -e /etc/yunohost/hooks.d ]] || (chown root /etc/yunohost/hooks.d && chmod 700 /etc/yunohost/hooks.d)
|
||||
[[ ! -e /etc/yunohost/apps ]] || (chown root /etc/yunohost/apps && chmod 700 /etc/yunohost/apps)
|
||||
|
||||
# Propagates changes in systemd service config overrides
|
||||
[[ ! "$regen_conf_files" =~ "ntp.service.d/ynh-override.conf" ]] || { systemctl daemon-reload; systemctl restart ntp; }
|
||||
[[ ! "$regen_conf_files" =~ "nftables.service.d/ynh-override.conf" ]] || systemctl daemon-reload
|
||||
}
|
||||
|
||||
_update_services() {
|
||||
|
@ -74,7 +123,7 @@ with open('services.yml') as f:
|
|||
new_services = yaml.load(f)
|
||||
|
||||
with open('/etc/yunohost/services.yml') as f:
|
||||
services = yaml.load(f)
|
||||
services = yaml.load(f) or {}
|
||||
|
||||
updated = False
|
||||
|
||||
|
@ -132,6 +181,7 @@ case "$1" in
|
|||
do_pre_regen $4
|
||||
;;
|
||||
post)
|
||||
do_post_regen $4
|
||||
;;
|
||||
init)
|
||||
do_init_regen
|
||||
|
|
|
@ -69,12 +69,11 @@ do_init_regen() {
|
|||
-out "${ssl_dir}/certs/yunohost_crt.pem" \
|
||||
-batch >>$LOGFILE 2>&1
|
||||
|
||||
last_cert=$(ls $ssl_dir/newcerts/*.pem | sort -V | tail -n 1)
|
||||
chmod 640 "${ssl_dir}/certs/yunohost_key.pem"
|
||||
chmod 640 "$last_cert"
|
||||
chmod 640 "${ssl_dir}/certs/yunohost_crt.pem"
|
||||
|
||||
cp "${ssl_dir}/certs/yunohost_key.pem" "$ynh_key"
|
||||
cp "$last_cert" "$ynh_crt"
|
||||
cp "${ssl_dir}/certs/yunohost_crt.pem" "$ynh_crt"
|
||||
ln -sf "$ynh_crt" /etc/ssl/certs/yunohost_crt.pem
|
||||
ln -sf "$ynh_key" /etc/ssl/private/yunohost_key.pem
|
||||
fi
|
||||
|
|
|
@ -12,27 +12,52 @@ do_init_regen() {
|
|||
|
||||
do_pre_regen ""
|
||||
|
||||
# fix some permissions
|
||||
chown root:openldap /etc/ldap/slapd.conf
|
||||
systemctl daemon-reload
|
||||
|
||||
_regenerate_slapd_conf
|
||||
|
||||
# Enforce permissions
|
||||
chown root:openldap /etc/ldap/slapd.ldif
|
||||
chown -R openldap:openldap /etc/ldap/schema/
|
||||
usermod -aG ssl-cert openldap
|
||||
|
||||
# check the slapd config file at first
|
||||
slaptest -Q -u -f /etc/ldap/slapd.conf
|
||||
|
||||
# regenerate LDAP config directory from slapd.conf
|
||||
rm -Rf /etc/ldap/slapd.d
|
||||
mkdir /etc/ldap/slapd.d
|
||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d/ 2>&1
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
|
||||
service slapd restart
|
||||
}
|
||||
|
||||
_regenerate_slapd_conf() {
|
||||
|
||||
# Validate the new slapd config
|
||||
# To do so, we have to use the .ldif to generate the config directory
|
||||
# so we use a temporary directory slapd_new.d
|
||||
rm -Rf /etc/ldap/slapd_new.d
|
||||
mkdir /etc/ldap/slapd_new.d
|
||||
slapadd -n0 -l /etc/ldap/slapd.ldif -F /etc/ldap/slapd_new.d/ 2>&1
|
||||
# Actual validation (-Q is for quiet, -u is for dry-run)
|
||||
slaptest -Q -u -F /etc/ldap/slapd_new.d
|
||||
|
||||
# "Commit" / apply the new config (meaning we delete the old one and replace
|
||||
# it with the new one)
|
||||
rm -Rf /etc/ldap/slapd.d
|
||||
mv /etc/ldap/slapd_new.d /etc/ldap/slapd.d
|
||||
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
}
|
||||
|
||||
do_pre_regen() {
|
||||
pending_dir=$1
|
||||
|
||||
cd /usr/share/yunohost/templates/slapd
|
||||
# remove temporary backup file
|
||||
rm -f "$tmp_backup_dir_file"
|
||||
|
||||
# Define if we need to migrate from hdb to mdb
|
||||
curr_backend=$(grep '^database' /etc/ldap/slapd.conf 2>/dev/null | awk '{print $2}')
|
||||
if [ -e /etc/ldap/slapd.conf ] && [ -n "$curr_backend" ] && \
|
||||
[ $curr_backend != 'mdb' ]; then
|
||||
backup_dir="/var/backups/dc=yunohost,dc=org-${curr_backend}-$(date +%s)"
|
||||
mkdir -p "$backup_dir"
|
||||
slapcat -b dc=yunohost,dc=org -l "${backup_dir}/dc=yunohost-dc=org.ldif"
|
||||
echo "$backup_dir" > "$tmp_backup_dir_file"
|
||||
fi
|
||||
|
||||
# create needed directories
|
||||
ldap_dir="${pending_dir}/etc/ldap"
|
||||
|
@ -40,28 +65,15 @@ do_pre_regen() {
|
|||
mkdir -p "$ldap_dir" "$schema_dir"
|
||||
|
||||
# remove legacy configuration file
|
||||
[ ! -f /etc/ldap/slapd-yuno.conf ] \
|
||||
|| touch "${pending_dir}/etc/ldap/slapd-yuno.conf"
|
||||
[ ! -f /etc/ldap/slapd-yuno.conf ] || touch "${ldap_dir}/slapd-yuno.conf"
|
||||
[ ! -f /etc/ldap/slapd.conf ] || touch "${ldap_dir}/slapd.conf"
|
||||
[ ! -f /etc/ldap/schema/yunohost.schema ] || touch "${schema_dir}/yunohost.schema"
|
||||
|
||||
# remove temporary backup file
|
||||
rm -f "$tmp_backup_dir_file"
|
||||
|
||||
# retrieve current and new backends
|
||||
curr_backend=$(grep '^database' /etc/ldap/slapd.conf 2>/dev/null | awk '{print $2}')
|
||||
new_backend=$(grep '^database' slapd.conf | awk '{print $2}')
|
||||
|
||||
# save current database before any conf changes
|
||||
if [[ -n "$curr_backend" && "$curr_backend" != "$new_backend" ]]; then
|
||||
backup_dir="/var/backups/dc=yunohost,dc=org-${curr_backend}-$(date +%s)"
|
||||
mkdir -p "$backup_dir"
|
||||
slapcat -b dc=yunohost,dc=org \
|
||||
-l "${backup_dir}/dc=yunohost-dc=org.ldif"
|
||||
echo "$backup_dir" > "$tmp_backup_dir_file"
|
||||
fi
|
||||
cd /usr/share/yunohost/templates/slapd
|
||||
|
||||
# copy configuration files
|
||||
cp -a ldap.conf slapd.conf "$ldap_dir"
|
||||
cp -a sudo.schema mailserver.schema yunohost.schema "$schema_dir"
|
||||
cp -a ldap.conf slapd.ldif "$ldap_dir"
|
||||
cp -a sudo.ldif mailserver.ldif permission.ldif "$schema_dir"
|
||||
|
||||
mkdir -p ${pending_dir}/etc/systemd/system/slapd.service.d/
|
||||
cp systemd-override.conf ${pending_dir}/etc/systemd/system/slapd.service.d/ynh-override.conf
|
||||
|
@ -72,19 +84,13 @@ do_pre_regen() {
|
|||
do_post_regen() {
|
||||
regen_conf_files=$1
|
||||
|
||||
# ensure that slapd.d exists
|
||||
mkdir -p /etc/ldap/slapd.d
|
||||
|
||||
# fix some permissions
|
||||
echo "Making sure we have the right permissions needed ..."
|
||||
echo "Enforce permissions on ldap/slapd directories and certs ..."
|
||||
# penldap user should be in the ssl-cert group to let it access the certificate for TLS
|
||||
usermod -aG ssl-cert openldap
|
||||
chown root:openldap /etc/ldap/slapd.conf
|
||||
chown root:openldap /etc/ldap/slapd.ldif
|
||||
chown -R openldap:openldap /etc/ldap/schema/
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
chown -R root:ssl-cert /etc/yunohost/certs/yunohost.org/
|
||||
chmod o-rwx /etc/yunohost/certs/yunohost.org/
|
||||
chmod -R g+rx /etc/yunohost/certs/yunohost.org/
|
||||
|
||||
# If we changed the systemd ynh-override conf
|
||||
if echo "$regen_conf_files" | sed 's/,/\n/g' | grep -q "^/etc/systemd/system/slapd.service.d/ynh-override.conf$"
|
||||
|
@ -95,29 +101,17 @@ do_post_regen() {
|
|||
|
||||
[ -z "$regen_conf_files" ] && exit 0
|
||||
|
||||
# check the slapd config file at first
|
||||
slaptest -Q -u -f /etc/ldap/slapd.conf
|
||||
# regenerate LDAP config directory from slapd.conf
|
||||
echo "Regenerate LDAP config directory from slapd.ldif"
|
||||
_regenerate_slapd_conf
|
||||
|
||||
# check if a backup should be restored
|
||||
# If there's a backup, re-import its data
|
||||
backup_dir=$(cat "$tmp_backup_dir_file" 2>/dev/null || true)
|
||||
if [[ -n "$backup_dir" && -f "${backup_dir}/dc=yunohost-dc=org.ldif" ]]; then
|
||||
# regenerate LDAP config directory and import database as root
|
||||
# since the admin user may be unavailable
|
||||
echo "Regenerate LDAP config directory and import the database using slapadd"
|
||||
sh -c "rm -Rf /etc/ldap/slapd.d;
|
||||
mkdir /etc/ldap/slapd.d;
|
||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d;
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d;
|
||||
slapadd -F /etc/ldap/slapd.d -b dc=yunohost,dc=org \
|
||||
-l '${backup_dir}/dc=yunohost-dc=org.ldif';
|
||||
chown -R openldap:openldap /var/lib/ldap" 2>&1
|
||||
else
|
||||
# regenerate LDAP config directory from slapd.conf
|
||||
echo "Regenerate LDAP config directory from slapd.conf"
|
||||
rm -Rf /etc/ldap/slapd.d
|
||||
mkdir /etc/ldap/slapd.d
|
||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d/ 2>&1
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
echo "Import the database using slapadd"
|
||||
slapadd -F /etc/ldap/slapd.d -b dc=yunohost,dc=org -l "${backup_dir}/dc=yunohost-dc=org.ldif"
|
||||
chown -R openldap:openldap /var/lib/ldap 2>&1
|
||||
fi
|
||||
|
||||
echo "Running slapdindex"
|
||||
|
@ -157,6 +151,9 @@ case "$1" in
|
|||
init)
|
||||
do_init_regen
|
||||
;;
|
||||
apply_config)
|
||||
do_post_regen /etc/ldap/slapd.ldif
|
||||
;;
|
||||
*)
|
||||
echo "hook called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
|
|
|
@ -14,7 +14,6 @@ do_pre_regen() {
|
|||
|
||||
# retrieve variables
|
||||
main_domain=$(cat /etc/yunohost/current_host)
|
||||
domain_list=$(yunohost domain list --output-as plain --quiet)
|
||||
|
||||
# install main conf file
|
||||
cat metronome.cfg.lua \
|
||||
|
@ -22,7 +21,7 @@ do_pre_regen() {
|
|||
> "${metronome_dir}/metronome.cfg.lua"
|
||||
|
||||
# add domain conf files
|
||||
for domain in $domain_list; do
|
||||
for domain in $YNH_DOMAINS; do
|
||||
cat domain.tpl.cfg.lua \
|
||||
| sed "s/{{ domain }}/${domain}/g" \
|
||||
> "${metronome_conf_dir}/${domain}.cfg.lua"
|
||||
|
@ -33,7 +32,7 @@ do_pre_regen() {
|
|||
| awk '/^[^\.]+\.[^\.]+.*\.cfg\.lua$/ { print $1 }')
|
||||
for file in $conf_files; do
|
||||
domain=${file%.cfg.lua}
|
||||
[[ $domain_list =~ $domain ]] \
|
||||
[[ $YNH_DOMAINS =~ $domain ]] \
|
||||
|| touch "${metronome_conf_dir}/${file}"
|
||||
done
|
||||
}
|
||||
|
@ -43,6 +42,9 @@ do_post_regen() {
|
|||
|
||||
# retrieve variables
|
||||
main_domain=$(cat /etc/yunohost/current_host)
|
||||
|
||||
# FIXME : small optimization to do to avoid calling a yunohost command ...
|
||||
# maybe another env variable like YNH_MAIN_DOMAINS idk
|
||||
domain_list=$(yunohost domain list --exclude-subdomains --output-as plain --quiet)
|
||||
|
||||
# create metronome directories for domains
|
||||
|
@ -55,6 +57,9 @@ do_post_regen() {
|
|||
done
|
||||
|
||||
# fix some permissions
|
||||
|
||||
# metronome should be in ssl-cert group to let it access SSL certificates
|
||||
usermod -aG ssl-cert metronome
|
||||
chown -R metronome: /var/lib/metronome/
|
||||
chown -R metronome: /etc/metronome/conf.d/
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@ do_init_regen() {
|
|||
ynh_render_template "security.conf.inc" "${nginx_conf_dir}/security.conf.inc"
|
||||
ynh_render_template "yunohost_admin.conf" "${nginx_conf_dir}/yunohost_admin.conf"
|
||||
|
||||
mkdir -p $nginx_conf_dir/default.d/
|
||||
cp "redirect_to_admin.conf" $nginx_conf_dir/default.d/
|
||||
|
||||
# Restart nginx if conf looks good, otherwise display error and exit unhappy
|
||||
nginx -t 2>/dev/null || { nginx -t; exit 1; }
|
||||
systemctl restart nginx || { journalctl --no-pager --lines=10 -u nginx >&2; exit 1; }
|
||||
|
@ -47,14 +50,15 @@ do_pre_regen() {
|
|||
|
||||
# retrieve variables
|
||||
main_domain=$(cat /etc/yunohost/current_host)
|
||||
domain_list=$(yunohost domain list --output-as plain --quiet)
|
||||
|
||||
# Support different strategy for security configurations
|
||||
export compatibility="$(yunohost settings get 'security.nginx.compatibility')"
|
||||
ynh_render_template "security.conf.inc" "${nginx_conf_dir}/security.conf.inc"
|
||||
|
||||
cert_status=$(yunohost domain cert-status --json)
|
||||
|
||||
# add domain conf files
|
||||
for domain in $domain_list; do
|
||||
for domain in $YNH_DOMAINS; do
|
||||
domain_conf_dir="${nginx_conf_dir}/${domain}.d"
|
||||
mkdir -p "$domain_conf_dir"
|
||||
mail_autoconfig_dir="${pending_dir}/var/www/.well-known/${domain}/autoconfig/mail/"
|
||||
|
@ -62,7 +66,7 @@ do_pre_regen() {
|
|||
|
||||
# NGINX server configuration
|
||||
export domain
|
||||
export domain_cert_ca=$(yunohost domain cert-status $domain --json \
|
||||
export domain_cert_ca=$(echo $cert_status \
|
||||
| jq ".certificates.\"$domain\".CA_type" \
|
||||
| tr -d '"')
|
||||
|
||||
|
@ -76,13 +80,15 @@ do_pre_regen() {
|
|||
done
|
||||
|
||||
ynh_render_template "yunohost_admin.conf" "${nginx_conf_dir}/yunohost_admin.conf"
|
||||
mkdir -p $nginx_conf_dir/default.d/
|
||||
cp "redirect_to_admin.conf" $nginx_conf_dir/default.d/
|
||||
|
||||
# remove old domain conf files
|
||||
conf_files=$(ls -1 /etc/nginx/conf.d \
|
||||
| awk '/^[^\.]+\.[^\.]+.*\.conf$/ { print $1 }')
|
||||
for file in $conf_files; do
|
||||
domain=${file%.conf}
|
||||
[[ $domain_list =~ $domain ]] \
|
||||
[[ $YNH_DOMAINS =~ $domain ]] \
|
||||
|| touch "${nginx_conf_dir}/${file}"
|
||||
done
|
||||
|
||||
|
@ -90,7 +96,7 @@ do_pre_regen() {
|
|||
autoconfig_files=$(ls -1 /var/www/.well-known/*/autoconfig/mail/config-v1.1.xml 2>/dev/null || true)
|
||||
for file in $autoconfig_files; do
|
||||
domain=$(basename $(readlink -f $(dirname $file)/../..))
|
||||
[[ $domain_list =~ $domain ]] \
|
||||
[[ $YNH_DOMAINS =~ $domain ]] \
|
||||
|| (mkdir -p "$(dirname ${pending_dir}/${file})" && touch "${pending_dir}/${file}")
|
||||
done
|
||||
|
||||
|
@ -104,16 +110,13 @@ do_post_regen() {
|
|||
|
||||
[ -z "$regen_conf_files" ] && exit 0
|
||||
|
||||
# retrieve variables
|
||||
domain_list=$(yunohost domain list --output-as plain --quiet)
|
||||
|
||||
# create NGINX conf directories for domains
|
||||
for domain in $domain_list; do
|
||||
for domain in $YNH_DOMAINS; do
|
||||
mkdir -p "/etc/nginx/conf.d/${domain}.d"
|
||||
done
|
||||
|
||||
# Get rid of legacy lets encrypt snippets
|
||||
for domain in $domain_list; do
|
||||
for domain in $YNH_DOMAINS; do
|
||||
# If the legacy letsencrypt / acme-challenge domain-specific snippet is still there
|
||||
if [ -e /etc/nginx/conf.d/${domain}.d/000-acmechallenge.conf ]
|
||||
then
|
||||
|
|
|
@ -20,18 +20,17 @@ do_pre_regen() {
|
|||
|
||||
# prepare main.cf conf file
|
||||
main_domain=$(cat /etc/yunohost/current_host)
|
||||
domain_list=$(yunohost domain list --output-as plain --quiet | tr '\n' ' ')
|
||||
|
||||
# Support different strategy for security configurations
|
||||
export compatibility="$(yunohost settings get 'security.postfix.compatibility')"
|
||||
|
||||
export main_domain
|
||||
export domain_list
|
||||
export domain_list="$YNH_DOMAINS"
|
||||
ynh_render_template "main.cf" "${postfix_dir}/main.cf"
|
||||
|
||||
cat postsrsd \
|
||||
| sed "s/{{ main_domain }}/${main_domain}/g" \
|
||||
| sed "s/{{ domain_list }}/${domain_list}/g" \
|
||||
| sed "s/{{ domain_list }}/${YNH_DOMAINS}/g" \
|
||||
> "${default_dir}/postsrsd"
|
||||
|
||||
# adapt it for IPv4-only hosts
|
||||
|
|
|
@ -25,11 +25,8 @@ do_post_regen() {
|
|||
mkdir -p /etc/dkim
|
||||
chown _rspamd /etc/dkim
|
||||
|
||||
# retrieve domain list
|
||||
domain_list=$(yunohost domain list --output-as plain --quiet)
|
||||
|
||||
# create DKIM key for domains
|
||||
for domain in $domain_list; do
|
||||
for domain in $YNH_DOMAINS; do
|
||||
domain_key="/etc/dkim/${domain}.mail.key"
|
||||
[ ! -f "$domain_key" ] && {
|
||||
# We use a 1024 bit size because nsupdate doesn't seem to be able to
|
||||
|
@ -45,6 +42,8 @@ do_post_regen() {
|
|||
chown _rspamd /etc/dkim/*.mail.key
|
||||
chmod 400 /etc/dkim/*.mail.key
|
||||
|
||||
[ ! -e /var/log/rspamd ] || chown -R _rspamd:_rspamd /var/log/rspamd
|
||||
|
||||
regen_conf_files=$1
|
||||
[ -z "$regen_conf_files" ] && exit 0
|
||||
|
||||
|
|
|
@ -15,6 +15,18 @@ do_pre_regen() {
|
|||
do_post_regen() {
|
||||
regen_conf_files=$1
|
||||
|
||||
# mysql is supposed to be an alias to mariadb... but in some weird case is not
|
||||
# c.f. https://forum.yunohost.org/t/mysql-ne-fonctionne-pas/11661
|
||||
# Playing with enable/disable allows to recreate the proper symlinks.
|
||||
if [ ! -e /etc/systemd/system/mysql.service ]
|
||||
then
|
||||
systemctl stop mysql -q
|
||||
systemctl disable mysql -q
|
||||
systemctl disable mariadb -q
|
||||
systemctl enable mariadb -q
|
||||
systemctl is-active mariadb -q || systemctl start mariadb
|
||||
fi
|
||||
|
||||
if [ ! -f /etc/yunohost/mysql ]; then
|
||||
|
||||
# ensure that mysql is running
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
do_pre_regen() {
|
||||
pending_dir=$1
|
||||
|
||||
cd /usr/share/yunohost/templates/glances
|
||||
|
||||
install -D -m 644 glances.default "${pending_dir}/etc/default/glances"
|
||||
}
|
||||
|
||||
do_post_regen() {
|
||||
regen_conf_files=$1
|
||||
|
||||
[[ -z "$regen_conf_files" ]] \
|
||||
|| service glances restart
|
||||
}
|
||||
|
||||
FORCE=${2:-0}
|
||||
DRY_RUN=${3:-0}
|
||||
|
||||
case "$1" in
|
||||
pre)
|
||||
do_pre_regen $4
|
||||
;;
|
||||
post)
|
||||
do_post_regen $4
|
||||
;;
|
||||
*)
|
||||
echo "hook called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
|
@ -26,10 +26,9 @@ do_pre_regen() {
|
|||
ynh_validate_ip4 "$ipv4" || ipv4='127.0.0.1'
|
||||
ipv6=$(curl -s -6 https://ip6.yunohost.org 2>/dev/null || true)
|
||||
ynh_validate_ip6 "$ipv6" || ipv6=''
|
||||
domain_list=$(yunohost domain list --output-as plain --quiet)
|
||||
|
||||
# add domain conf files
|
||||
for domain in $domain_list; do
|
||||
for domain in $YNH_DOMAINS; do
|
||||
cat domain.tpl \
|
||||
| sed "s/{{ domain }}/${domain}/g" \
|
||||
| sed "s/{{ ip }}/${ipv4}/g" \
|
||||
|
@ -42,7 +41,7 @@ do_pre_regen() {
|
|||
conf_files=$(ls -1 /etc/dnsmasq.d \
|
||||
| awk '/^[^\.]+\.[^\.]+.*$/ { print $1 }')
|
||||
for domain in $conf_files; do
|
||||
[[ $domain_list =~ $domain ]] \
|
||||
[[ $YNH_DOMAINS =~ $domain ]] \
|
||||
|| touch "${dnsmasq_dir}/${domain}"
|
||||
done
|
||||
}
|
||||
|
@ -65,8 +64,21 @@ do_post_regen() {
|
|||
systemctl restart resolvconf
|
||||
fi
|
||||
|
||||
[[ -z "$regen_conf_files" ]] \
|
||||
|| service dnsmasq restart
|
||||
# Some stupid things like rabbitmq-server used by onlyoffice won't work if
|
||||
# the *short* hostname doesn't exists in /etc/hosts -_-
|
||||
short_hostname=$(hostname -s)
|
||||
grep -q "127.0.0.1.*$short_hostname" /etc/hosts || echo -e "\n127.0.0.1\t$short_hostname" >>/etc/hosts
|
||||
|
||||
[[ -n "$regen_conf_files" ]] || return
|
||||
|
||||
# Remove / disable services likely to conflict with dnsmasq
|
||||
for SERVICE in systemd-resolved bind9
|
||||
do
|
||||
systemctl is-enabled $SERVICE &>/dev/null && systemctl disable $SERVICE 2>/dev/null
|
||||
systemctl is-active $SERVICE &>/dev/null && systemctl stop $SERVICE
|
||||
done
|
||||
|
||||
systemctl restart dnsmasq
|
||||
}
|
||||
|
||||
FORCE=${2:-0}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from moulinette.utils.process import check_output
|
||||
from moulinette.utils.filesystem import read_file
|
||||
from moulinette.utils.filesystem import read_file, read_json, write_to_json
|
||||
from yunohost.diagnosis import Diagnoser
|
||||
from yunohost.utils.packages import ynh_packages_version
|
||||
|
||||
|
@ -32,7 +34,7 @@ class BaseSystemDiagnoser(Diagnoser):
|
|||
|
||||
# Also possibly the board name
|
||||
if os.path.exists("/proc/device-tree/model"):
|
||||
model = read_file('/proc/device-tree/model').strip()
|
||||
model = read_file('/proc/device-tree/model').strip().replace('\x00', '')
|
||||
hardware["data"]["model"] = model
|
||||
hardware["details"] = ["diagnosis_basesystem_hardware_board"]
|
||||
|
||||
|
@ -74,5 +76,75 @@ class BaseSystemDiagnoser(Diagnoser):
|
|||
details=ynh_version_details)
|
||||
|
||||
|
||||
if self.is_vulnerable_to_meltdown():
|
||||
yield dict(meta={"test": "meltdown"},
|
||||
status="ERROR",
|
||||
summary="diagnosis_security_vulnerable_to_meltdown",
|
||||
details=["diagnosis_security_vulnerable_to_meltdown_details"]
|
||||
)
|
||||
|
||||
def is_vulnerable_to_meltdown(self):
|
||||
# meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754
|
||||
|
||||
# We use a cache file to avoid re-running the script so many times,
|
||||
# which can be expensive (up to around 5 seconds on ARM)
|
||||
# and make the admin appear to be slow (c.f. the calls to diagnosis
|
||||
# from the webadmin)
|
||||
#
|
||||
# The cache is in /tmp and shall disappear upon reboot
|
||||
# *or* we compare it to dpkg.log modification time
|
||||
# such that it's re-ran if there was package upgrades
|
||||
# (e.g. from yunohost)
|
||||
cache_file = "/tmp/yunohost-meltdown-diagnosis"
|
||||
dpkg_log = "/var/log/dpkg.log"
|
||||
if os.path.exists(cache_file):
|
||||
if not os.path.exists(dpkg_log) or os.path.getmtime(cache_file) > os.path.getmtime(dpkg_log):
|
||||
self.logger_debug("Using cached results for meltdown checker, from %s" % cache_file)
|
||||
return read_json(cache_file)[0]["VULNERABLE"]
|
||||
|
||||
# script taken from https://github.com/speed47/spectre-meltdown-checker
|
||||
# script commit id is store directly in the script
|
||||
SCRIPT_PATH = "/usr/lib/moulinette/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh"
|
||||
|
||||
# '--variant 3' corresponds to Meltdown
|
||||
# example output from the script:
|
||||
# [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}]
|
||||
try:
|
||||
self.logger_debug("Running meltdown vulnerability checker")
|
||||
call = subprocess.Popen("bash %s --batch json --variant 3" %
|
||||
SCRIPT_PATH, shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
# TODO / FIXME : here we are ignoring error messages ...
|
||||
# in particular on RPi2 and other hardware, the script complains about
|
||||
# "missing some kernel info (see -v), accuracy might be reduced"
|
||||
# Dunno what to do about that but we probably don't want to harass
|
||||
# users with this warning ...
|
||||
output, err = call.communicate()
|
||||
assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode
|
||||
|
||||
# If there are multiple lines, sounds like there was some messages
|
||||
# in stdout that are not json >.> ... Try to get the actual json
|
||||
# stuff which should be the last line
|
||||
output = output.strip()
|
||||
if "\n" in output:
|
||||
self.logger_debug("Original meltdown checker output : %s" % output)
|
||||
output = output.split("\n")[-1]
|
||||
|
||||
CVEs = json.loads(output)
|
||||
assert len(CVEs) == 1
|
||||
assert CVEs[0]["NAME"] == "MELTDOWN"
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
self.logger_warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e)
|
||||
raise Exception("Command output for failed meltdown check: '%s'" % output)
|
||||
|
||||
self.logger_debug("Writing results from meltdown checker to cache file, %s" % cache_file)
|
||||
write_to_json(cache_file, CVEs)
|
||||
return CVEs[0]["VULNERABLE"]
|
||||
|
||||
|
||||
def main(args, env, loggers):
|
||||
return BaseSystemDiagnoser(args, env, loggers).diagnose()
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import re
|
||||
import os
|
||||
import random
|
||||
|
||||
|
@ -10,6 +11,7 @@ from moulinette.utils.filesystem import read_file
|
|||
from yunohost.diagnosis import Diagnoser
|
||||
from yunohost.utils.network import get_network_interfaces
|
||||
|
||||
|
||||
class IPDiagnoser(Diagnoser):
|
||||
|
||||
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
|
||||
|
@ -72,8 +74,9 @@ class IPDiagnoser(Diagnoser):
|
|||
ipv6 = self.get_public_ip(6) if can_ping_ipv6 else None
|
||||
|
||||
network_interfaces = get_network_interfaces()
|
||||
|
||||
def get_local_ip(version):
|
||||
local_ip = {iface:addr[version].split("/")[0]
|
||||
local_ip = {iface: addr[version].split("/")[0]
|
||||
for iface, addr in network_interfaces.items() if version in addr}
|
||||
if not local_ip:
|
||||
return None
|
||||
|
@ -92,7 +95,7 @@ class IPDiagnoser(Diagnoser):
|
|||
data={"global": ipv6, "local": get_local_ip("ipv6")},
|
||||
status="SUCCESS" if ipv6 else "WARNING",
|
||||
summary="diagnosis_ip_connected_ipv6" if ipv6 else "diagnosis_ip_no_ipv6",
|
||||
details=["diagnosis_ip_global", "diagnosis_ip_local"] if ipv6 else None)
|
||||
details=["diagnosis_ip_global", "diagnosis_ip_local"] if ipv6 else ["diagnosis_ip_no_ipv6_tip"])
|
||||
|
||||
# TODO / FIXME : add some attempt to detect ISP (using whois ?) ?
|
||||
|
||||
|
@ -105,9 +108,17 @@ class IPDiagnoser(Diagnoser):
|
|||
return False
|
||||
|
||||
# If we are indeed connected in ipv4 or ipv6, we should find a default route
|
||||
routes = check_output("ip -%s route" % protocol).split("\n")
|
||||
if not any(r.startswith("default") for r in routes):
|
||||
return False
|
||||
routes = check_output("ip -%s route show table all" % protocol).split("\n")
|
||||
|
||||
def is_default_route(r):
|
||||
# Typically the default route starts with "default"
|
||||
# But of course IPv6 is more complex ... e.g. on internet cube there's
|
||||
# no default route but a /3 which acts as a default-like route...
|
||||
# e.g. 2000:/3 dev tun0 ...
|
||||
return r.startswith("default") or (":" in r and re.match(r".*/[0-3]$", r.split()[0]))
|
||||
if not any(is_default_route(r) for r in routes):
|
||||
self.logger_debug("No default route for IPv%s, so assuming there's no IP address for that version" % protocol)
|
||||
return None
|
||||
|
||||
# We use the resolver file as a list of well-known, trustable (ie not google ;)) IPs that we can ping
|
||||
resolver_file = "/usr/share/yunohost/templates/dnsmasq/plain/resolv.dnsmasq.conf"
|
||||
|
|
|
@ -1,13 +1,19 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from moulinette.utils.filesystem import read_file
|
||||
from datetime import datetime, timedelta
|
||||
from publicsuffix import PublicSuffixList
|
||||
|
||||
from moulinette.utils.process import check_output
|
||||
|
||||
from yunohost.utils.network import dig
|
||||
from yunohost.diagnosis import Diagnoser
|
||||
from yunohost.domain import domain_list, _build_dns_conf, _get_maindomain
|
||||
|
||||
YNH_DYNDNS_DOMAINS = ['nohost.me', 'noho.st', 'ynh.fr']
|
||||
|
||||
|
||||
class DNSRecordsDiagnoser(Diagnoser):
|
||||
|
||||
|
@ -17,24 +23,22 @@ class DNSRecordsDiagnoser(Diagnoser):
|
|||
|
||||
def run(self):
|
||||
|
||||
resolvers = read_file("/etc/resolv.dnsmasq.conf").split("\n")
|
||||
ipv4_resolvers = [r.split(" ")[1] for r in resolvers if r.startswith("nameserver") and ":" not in r]
|
||||
# FIXME some day ... handle ipv4-only and ipv6-only servers. For now we assume we have at least ipv4
|
||||
assert ipv4_resolvers != [], "Uhoh, need at least one IPv4 DNS resolver ..."
|
||||
|
||||
self.resolver = ipv4_resolvers[0]
|
||||
main_domain = _get_maindomain()
|
||||
|
||||
all_domains = domain_list()["domains"]
|
||||
for domain in all_domains:
|
||||
self.logger_debug("Diagnosing DNS conf for %s" % domain)
|
||||
is_subdomain = domain.split(".",1)[1] in all_domains
|
||||
is_subdomain = domain.split(".", 1)[1] in all_domains
|
||||
for report in self.check_domain(domain, domain == main_domain, is_subdomain=is_subdomain):
|
||||
yield report
|
||||
|
||||
# FIXME : somewhere, should implement a check for reverse DNS ...
|
||||
|
||||
# FIXME / TODO : somewhere, could also implement a check for domain expiring soon
|
||||
# Check if a domain buy by the user will expire soon
|
||||
psl = PublicSuffixList()
|
||||
domains_from_registrar = [psl.get_public_suffix(domain) for domain in all_domains]
|
||||
domains_from_registrar = [domain for domain in domains_from_registrar if "." in domain]
|
||||
domains_from_registrar = set(domains_from_registrar) - set(YNH_DYNDNS_DOMAINS + ["netlib.re"])
|
||||
for report in self.check_expiration_date(domains_from_registrar):
|
||||
yield report
|
||||
|
||||
def check_domain(self, domain, is_main_domain, is_subdomain):
|
||||
|
||||
|
@ -67,7 +71,6 @@ class DNSRecordsDiagnoser(Diagnoser):
|
|||
results[id_] = "WRONG"
|
||||
discrepancies.append(("diagnosis_dns_discrepancy", r))
|
||||
|
||||
|
||||
def its_important():
|
||||
# Every mail DNS records are important for main domain
|
||||
# For other domain, we only report it as a warning for now...
|
||||
|
@ -95,7 +98,13 @@ class DNSRecordsDiagnoser(Diagnoser):
|
|||
summary=summary)
|
||||
|
||||
if discrepancies:
|
||||
output["details"] = ["diagnosis_dns_point_to_doc"] + discrepancies
|
||||
# For ynh-managed domains (nohost.me etc...), tell people to try to "yunohost dyndns update --force"
|
||||
if any(domain.endswith(ynh_dyndns_domain) for ynh_dyndns_domain in YNH_DYNDNS_DOMAINS):
|
||||
output["details"] = ["diagnosis_dns_try_dyndns_update_force"]
|
||||
# Otherwise point to the documentation
|
||||
else:
|
||||
output["details"] = ["diagnosis_dns_point_to_doc"]
|
||||
output["details"] += discrepancies
|
||||
|
||||
yield output
|
||||
|
||||
|
@ -128,7 +137,7 @@ class DNSRecordsDiagnoser(Diagnoser):
|
|||
if r["name"] == "@":
|
||||
current = {part for part in current if not part.startswith("ip4:") and not part.startswith("ip6:")}
|
||||
return expected == current
|
||||
elif r["type"] == "MX":
|
||||
elif r["type"] == "MX":
|
||||
# For MX, we want to ignore the priority
|
||||
expected = r["value"].split()[-1]
|
||||
current = r["current"].split()[-1]
|
||||
|
@ -136,6 +145,92 @@ class DNSRecordsDiagnoser(Diagnoser):
|
|||
else:
|
||||
return r["current"] == r["value"]
|
||||
|
||||
def check_expiration_date(self, domains):
|
||||
"""
|
||||
Alert if expiration date of a domain is soon
|
||||
"""
|
||||
|
||||
details = {
|
||||
"not_found": [],
|
||||
"error": [],
|
||||
"warning": [],
|
||||
"success": []
|
||||
}
|
||||
|
||||
for domain in domains:
|
||||
expire_date = self.get_domain_expiration(domain)
|
||||
|
||||
if isinstance(expire_date, str):
|
||||
status_ns, _ = dig(domain, "NS", resolvers="force_external")
|
||||
status_a, _ = dig(domain, "A", resolvers="force_external")
|
||||
if "ok" not in [status_ns, status_a]:
|
||||
details["not_found"].append((
|
||||
"diagnosis_domain_%s_details" % (expire_date),
|
||||
{"domain": domain}))
|
||||
else:
|
||||
self.logger_debug("Dyndns domain: %s" % (domain))
|
||||
continue
|
||||
|
||||
expire_in = expire_date - datetime.now()
|
||||
|
||||
alert_type = "success"
|
||||
if expire_in <= timedelta(15):
|
||||
alert_type = "error"
|
||||
elif expire_in <= timedelta(45):
|
||||
alert_type = "warning"
|
||||
|
||||
args = {
|
||||
"domain": domain,
|
||||
"days": expire_in.days - 1,
|
||||
"expire_date": str(expire_date)
|
||||
}
|
||||
details[alert_type].append(("diagnosis_domain_expires_in", args))
|
||||
|
||||
for alert_type in ["success", "error", "warning", "not_found"]:
|
||||
if details[alert_type]:
|
||||
if alert_type == "not_found":
|
||||
meta = {"test": "domain_not_found"}
|
||||
else:
|
||||
meta = {"test": "domain_expiration"}
|
||||
# Allow to ignore specifically a single domain
|
||||
if len(details[alert_type]) == 1:
|
||||
meta["domain"] = details[alert_type][0][1]["domain"]
|
||||
yield dict(meta=meta,
|
||||
data={},
|
||||
status=alert_type.upper() if alert_type != "not_found" else "WARNING",
|
||||
summary="diagnosis_domain_expiration_" + alert_type,
|
||||
details=details[alert_type])
|
||||
|
||||
def get_domain_expiration(self, domain):
|
||||
"""
|
||||
Return the expiration datetime of a domain or None
|
||||
"""
|
||||
command = "whois -H %s || echo failed" % (domain)
|
||||
out = check_output(command).strip().split("\n")
|
||||
|
||||
# Reduce output to determine if whois answer is equivalent to NOT FOUND
|
||||
filtered_out = [line for line in out
|
||||
if re.search(r'^[a-zA-Z0-9 ]{4,25}:', line, re.IGNORECASE) and
|
||||
not re.match(r'>>> Last update of whois', line, re.IGNORECASE) and
|
||||
not re.match(r'^NOTICE:', line, re.IGNORECASE) and
|
||||
not re.match(r'^%%', line, re.IGNORECASE) and
|
||||
not re.match(r'"https?:"', line, re.IGNORECASE)]
|
||||
|
||||
# If there is less than 7 lines, it's NOT FOUND response
|
||||
if len(filtered_out) <= 6:
|
||||
return "not_found"
|
||||
|
||||
for line in out:
|
||||
match = re.search(r'Expir.+(\d{4}-\d{2}-\d{2})', line, re.IGNORECASE)
|
||||
if match is not None:
|
||||
return datetime.strptime(match.group(1), '%Y-%m-%d')
|
||||
|
||||
match = re.search(r'Expir.+(\d{2}-\w{3}-\d{4})', line, re.IGNORECASE)
|
||||
if match is not None:
|
||||
return datetime.strptime(match.group(1), '%d-%b-%Y')
|
||||
|
||||
return "expiration_not_found"
|
||||
|
||||
|
||||
def main(args, env, loggers):
|
||||
return DNSRecordsDiagnoser(args, env, loggers).diagnose()
|
||||
|
|
|
@ -87,7 +87,7 @@ class PortsDiagnoser(Diagnoser):
|
|||
# If any AAAA record is set, IPv6 is important...
|
||||
def ipv6_is_important():
|
||||
dnsrecords = Diagnoser.get_cached_report("dnsrecords") or {}
|
||||
return any(record["data"]["AAAA:@"] in ["OK", "WRONG"] for record in dnsrecords.get("items", []))
|
||||
return any(record["data"].get("AAAA:@") in ["OK", "WRONG"] for record in dnsrecords.get("items", []))
|
||||
|
||||
if failed == 4 or ipv6_is_important():
|
||||
yield dict(meta={"port": port},
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
import os
|
||||
import dns.resolver
|
||||
import socket
|
||||
import re
|
||||
|
||||
from subprocess import CalledProcessError
|
||||
|
@ -118,15 +117,27 @@ class MailDiagnoser(Diagnoser):
|
|||
details = ["diagnosis_mail_fcrdns_nok_details",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_4"]
|
||||
|
||||
try:
|
||||
rdns_domain, _, _ = socket.gethostbyaddr(ip)
|
||||
except socket.herror:
|
||||
rev = dns.reversename.from_address(ip)
|
||||
subdomain = str(rev.split(3)[0])
|
||||
query = subdomain
|
||||
if ipversion == 4:
|
||||
query += '.in-addr.arpa'
|
||||
else:
|
||||
query += '.ip6.arpa'
|
||||
|
||||
# Do the DNS Query
|
||||
status, value = dig(query, 'PTR', resolvers="force_external")
|
||||
if status == "nok":
|
||||
yield dict(meta={"test": "mail_fcrdns", "ipversion": ipversion},
|
||||
data={"ip": ip, "ehlo_domain": self.ehlo_domain},
|
||||
status="ERROR",
|
||||
summary="diagnosis_mail_fcrdns_dns_missing",
|
||||
details=details)
|
||||
continue
|
||||
|
||||
rdns_domain = ''
|
||||
if len(value) > 0:
|
||||
rdns_domain = value[0][:-1] if value[0].endswith('.') else value[0]
|
||||
if rdns_domain != self.ehlo_domain:
|
||||
details = ["diagnosis_mail_fcrdns_different_from_ehlo_domain_details"] + details
|
||||
yield dict(meta={"test": "mail_fcrdns", "ipversion": ipversion},
|
||||
|
|
|
@ -21,7 +21,7 @@ class ServicesDiagnoser(Diagnoser):
|
|||
data={"status": result["status"], "configuration": result["configuration"]})
|
||||
|
||||
if result["status"] != "running":
|
||||
item["status"] = "ERROR"
|
||||
item["status"] = "ERROR" if result["status"] != "unknown" else "WARNING"
|
||||
item["summary"] = "diagnosis_services_bad_status"
|
||||
item["details"] = ["diagnosis_services_bad_status_tip"]
|
||||
|
||||
|
|
|
@ -45,14 +45,15 @@ class SystemResourcesDiagnoser(Diagnoser):
|
|||
item = dict(meta={"test": "swap"},
|
||||
data={"total": human_size(swap.total), "recommended": "512 MiB"})
|
||||
if swap.total <= 1 * MB:
|
||||
item["status"] = "ERROR"
|
||||
item["status"] = "INFO"
|
||||
item["summary"] = "diagnosis_swap_none"
|
||||
elif swap.total <= 512 * MB:
|
||||
item["status"] = "WARNING"
|
||||
elif swap.total < 450 * MB:
|
||||
item["status"] = "INFO"
|
||||
item["summary"] = "diagnosis_swap_notsomuch"
|
||||
else:
|
||||
item["status"] = "SUCCESS"
|
||||
item["summary"] = "diagnosis_swap_ok"
|
||||
item["details"] = ["diagnosis_swap_tip"]
|
||||
yield item
|
||||
|
||||
# FIXME : add a check that swapiness is low if swap is on a sdcard...
|
||||
|
@ -61,40 +62,36 @@ class SystemResourcesDiagnoser(Diagnoser):
|
|||
# Disks usage
|
||||
#
|
||||
|
||||
disk_partitions = psutil.disk_partitions()
|
||||
disk_partitions = sorted(psutil.disk_partitions(), key=lambda k: k.mountpoint)
|
||||
|
||||
for disk_partition in disk_partitions:
|
||||
device = disk_partition.device
|
||||
mountpoint = disk_partition.mountpoint
|
||||
|
||||
usage = psutil.disk_usage(mountpoint)
|
||||
free_percent = round_(100 - usage.percent)
|
||||
free_percent = 100 - round_(usage.percent)
|
||||
|
||||
item = dict(meta={"test": "diskusage", "mountpoint": mountpoint},
|
||||
data={"device": device, "total": human_size(usage.total), "free": human_size(usage.free), "free_percent": free_percent})
|
||||
data={"device": device,
|
||||
# N.B.: we do not use usage.total because we want
|
||||
# to take into account the 5% security margin
|
||||
# correctly (c.f. the doc of psutil ...)
|
||||
"total": human_size(usage.used+usage.free),
|
||||
"free": human_size(usage.free),
|
||||
"free_percent": free_percent})
|
||||
|
||||
# Special checks for /boot partition because they sometimes are
|
||||
# pretty small and that's kind of okay... (for example on RPi)
|
||||
if mountpoint.startswith("/boot"):
|
||||
if usage.free < 10 * MB or free_percent < 10:
|
||||
item["status"] = "ERROR"
|
||||
item["summary"] = "diagnosis_diskusage_verylow"
|
||||
elif usage.free < 20 * MB or free_percent < 20:
|
||||
item["status"] = "WARNING"
|
||||
item["summary"] = "diagnosis_diskusage_low"
|
||||
else:
|
||||
item["status"] = "SUCCESS"
|
||||
item["summary"] = "diagnosis_diskusage_ok"
|
||||
# We have an additional absolute constrain on / and /var because
|
||||
# system partitions are critical, having them full may prevent
|
||||
# upgrades etc...
|
||||
if free_percent < 2.5 or (mountpoint in ["/", "/var"] and usage.free < 1 * GB):
|
||||
item["status"] = "ERROR"
|
||||
item["summary"] = "diagnosis_diskusage_verylow"
|
||||
elif free_percent < 5 or (mountpoint in ["/", "/var"] and usage.free < 2 * GB):
|
||||
item["status"] = "WARNING"
|
||||
item["summary"] = "diagnosis_diskusage_low"
|
||||
else:
|
||||
if usage.free < 1 * GB or free_percent < 5:
|
||||
item["status"] = "ERROR"
|
||||
item["summary"] = "diagnosis_diskusage_verylow"
|
||||
elif usage.free < 2 * GB or free_percent < 10:
|
||||
item["status"] = "WARNING"
|
||||
item["summary"] = "diagnosis_diskusage_low"
|
||||
else:
|
||||
item["status"] = "SUCCESS"
|
||||
item["summary"] = "diagnosis_diskusage_ok"
|
||||
item["status"] = "SUCCESS"
|
||||
item["summary"] = "diagnosis_diskusage_ok"
|
||||
|
||||
|
||||
yield item
|
||||
|
|
|
@ -1,98 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from yunohost.diagnosis import Diagnoser
|
||||
from moulinette.utils.filesystem import read_json, write_to_json
|
||||
|
||||
|
||||
class SecurityDiagnoser(Diagnoser):
|
||||
|
||||
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
|
||||
cache_duration = 3600
|
||||
dependencies = []
|
||||
|
||||
def run(self):
|
||||
|
||||
"CVE-2017-5754"
|
||||
|
||||
if self.is_vulnerable_to_meltdown():
|
||||
yield dict(meta={"test": "meltdown"},
|
||||
status="ERROR",
|
||||
summary="diagnosis_security_vulnerable_to_meltdown",
|
||||
details=["diagnosis_security_vulnerable_to_meltdown_details"]
|
||||
)
|
||||
else:
|
||||
yield dict(meta={},
|
||||
status="SUCCESS",
|
||||
summary="diagnosis_security_all_good"
|
||||
)
|
||||
|
||||
|
||||
def is_vulnerable_to_meltdown(self):
|
||||
# meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754
|
||||
|
||||
# We use a cache file to avoid re-running the script so many times,
|
||||
# which can be expensive (up to around 5 seconds on ARM)
|
||||
# and make the admin appear to be slow (c.f. the calls to diagnosis
|
||||
# from the webadmin)
|
||||
#
|
||||
# The cache is in /tmp and shall disappear upon reboot
|
||||
# *or* we compare it to dpkg.log modification time
|
||||
# such that it's re-ran if there was package upgrades
|
||||
# (e.g. from yunohost)
|
||||
cache_file = "/tmp/yunohost-meltdown-diagnosis"
|
||||
dpkg_log = "/var/log/dpkg.log"
|
||||
if os.path.exists(cache_file):
|
||||
if not os.path.exists(dpkg_log) or os.path.getmtime(cache_file) > os.path.getmtime(dpkg_log):
|
||||
self.logger_debug("Using cached results for meltdown checker, from %s" % cache_file)
|
||||
return read_json(cache_file)[0]["VULNERABLE"]
|
||||
|
||||
# script taken from https://github.com/speed47/spectre-meltdown-checker
|
||||
# script commit id is store directly in the script
|
||||
SCRIPT_PATH = "/usr/lib/moulinette/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh"
|
||||
|
||||
# '--variant 3' corresponds to Meltdown
|
||||
# example output from the script:
|
||||
# [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}]
|
||||
try:
|
||||
self.logger_debug("Running meltdown vulnerability checker")
|
||||
call = subprocess.Popen("bash %s --batch json --variant 3" %
|
||||
SCRIPT_PATH, shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
# TODO / FIXME : here we are ignoring error messages ...
|
||||
# in particular on RPi2 and other hardware, the script complains about
|
||||
# "missing some kernel info (see -v), accuracy might be reduced"
|
||||
# Dunno what to do about that but we probably don't want to harass
|
||||
# users with this warning ...
|
||||
output, err = call.communicate()
|
||||
assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode
|
||||
|
||||
# If there are multiple lines, sounds like there was some messages
|
||||
# in stdout that are not json >.> ... Try to get the actual json
|
||||
# stuff which should be the last line
|
||||
output = output.strip()
|
||||
if "\n" in output:
|
||||
self.logger_debug("Original meltdown checker output : %s" % output)
|
||||
output = output.split("\n")[-1]
|
||||
|
||||
CVEs = json.loads(output)
|
||||
assert len(CVEs) == 1
|
||||
assert CVEs[0]["NAME"] == "MELTDOWN"
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
self.logger_warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e)
|
||||
raise Exception("Command output for failed meltdown check: '%s'" % output)
|
||||
|
||||
self.logger_debug("Writing results from meltdown checker to cache file, %s" % cache_file)
|
||||
write_to_json(cache_file, CVEs)
|
||||
return CVEs[0]["VULNERABLE"]
|
||||
|
||||
|
||||
def main(args, env, loggers):
|
||||
return SecurityDiagnoser(args, env, loggers).diagnose()
|
|
@ -39,6 +39,9 @@ else
|
|||
# Restore the configuration
|
||||
mv /etc/ldap/slapd.d "$TMPDIR"
|
||||
mkdir -p /etc/ldap/slapd.d
|
||||
cp -a "${backup_dir}/ldap.conf" /etc/ldap/ldap.conf
|
||||
cp -a "${backup_dir}/slapd.ldif" /etc/ldap/slapd.ldif
|
||||
# Legacy thing but we need it to force the regen-conf in case of it exist
|
||||
cp -a "${backup_dir}/slapd.conf" /etc/ldap/slapd.conf
|
||||
slapadd -F /etc/ldap/slapd.d -b cn=config \
|
||||
-l "${backup_dir}/cn=config.master.ldif" \
|
||||
|
|
|
@ -1,42 +1,5 @@
|
|||
backup_dir="$1/conf/ynh/mysql"
|
||||
MYSQL_PKG="$(dpkg --list | sed -ne 's/^ii \(mariadb-server-[[:digit:].]\+\) .*$/\1/p')"
|
||||
# We don't backup/restore mysql password anymore
|
||||
# c.f. https://github.com/YunoHost/yunohost/pull/912
|
||||
|
||||
. /usr/share/yunohost/helpers
|
||||
|
||||
# ensure that mysql is running
|
||||
service mysql status >/dev/null 2>&1 \
|
||||
|| service mysql start
|
||||
|
||||
# retrieve current and new password
|
||||
[ -f /etc/yunohost/mysql ] \
|
||||
&& curr_pwd=$(cat /etc/yunohost/mysql)
|
||||
new_pwd=$(cat "${backup_dir}/root_pwd" || cat "${backup_dir}/mysql")
|
||||
[ -z "$curr_pwd" ] && curr_pwd="yunohost"
|
||||
[ -z "$new_pwd" ] && {
|
||||
new_pwd=$(ynh_string_random 10)
|
||||
}
|
||||
|
||||
# attempt to change it
|
||||
mysqladmin -s -u root -p"$curr_pwd" password "$new_pwd" || {
|
||||
|
||||
echo "It seems that you have already configured MySQL." \
|
||||
"YunoHost needs to have a root access to MySQL to runs its" \
|
||||
"applications, and is going to reset the MySQL root password." \
|
||||
"You can find this new password in /etc/yunohost/mysql." >&2
|
||||
|
||||
# set new password with debconf
|
||||
debconf-set-selections << EOF
|
||||
$MYSQL_PKG mysql-server/root_password password $new_pwd
|
||||
$MYSQL_PKG mysql-server/root_password_again password $new_pwd
|
||||
EOF
|
||||
|
||||
# reconfigure Debian package
|
||||
dpkg-reconfigure -freadline -u "$MYSQL_PKG" 2>&1
|
||||
}
|
||||
|
||||
# store new root password
|
||||
echo "$new_pwd" | tee /etc/yunohost/mysql
|
||||
chmod 400 /etc/yunohost/mysql
|
||||
|
||||
# reload the grant tables
|
||||
mysqladmin -s -u root -p"$new_pwd" reload
|
||||
# This is a dummy empty file as a workaround for
|
||||
# https://github.com/YunoHost/issues/issues/1553 until it is fixed
|
||||
|
|
8
data/other/ffdhe2048.pem
Normal file
8
data/other/ffdhe2048.pem
Normal file
|
@ -0,0 +1,8 @@
|
|||
-----BEGIN DH PARAMETERS-----
|
||||
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
|
||||
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
|
||||
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
|
||||
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
|
||||
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
|
||||
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
|
||||
-----END DH PARAMETERS-----
|
|
@ -14,18 +14,19 @@ mail_plugins = $mail_plugins quota
|
|||
|
||||
###############################################################################
|
||||
|
||||
# generated 2020-04-03, Mozilla Guideline v5.4, Dovecot 2.2.27, OpenSSL 1.1.0l, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=dovecot&version=2.2.27&config=intermediate&openssl=1.1.0l&guideline=5.4
|
||||
# generated 2020-08-18, Mozilla Guideline v5.6, Dovecot 2.3.4, OpenSSL 1.1.1d, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=dovecot&version=2.3.4&config=intermediate&openssl=1.1.1d&guideline=5.6
|
||||
|
||||
ssl = required
|
||||
|
||||
ssl_cert = </etc/yunohost/certs/{{ main_domain }}/crt.pem
|
||||
ssl_key = </etc/yunohost/certs/{{ main_domain }}/key.pem
|
||||
|
||||
ssl_dh_parameters_length = 2048
|
||||
# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
|
||||
ssl_dh = /usr/share/yunohost/other/ffdhe2048.pem;
|
||||
|
||||
# intermediate configuration
|
||||
ssl_protocols = TLSv1.2
|
||||
ssl_min_protocol = TLSv1.2
|
||||
ssl_cipher_list = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
|
||||
ssl_prefer_server_ciphers = no
|
||||
|
||||
|
|
|
@ -47,6 +47,10 @@ Component "muc.{{ domain }}" "muc"
|
|||
|
||||
muc_event_rate = 0.5
|
||||
muc_burst_factor = 10
|
||||
room_default_config = {
|
||||
logging = true,
|
||||
persistent = true
|
||||
};
|
||||
|
||||
---Set up a PubSub server
|
||||
Component "pubsub.{{ domain }}" "pubsub"
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
lua_shared_dict cache 10m;
|
||||
init_by_lua_file /usr/share/ssowat/init.lua;
|
||||
server_names_hash_bucket_size 64;
|
||||
server_names_hash_bucket_size 128;
|
||||
|
|
|
@ -6,6 +6,9 @@ location /yunohost/admin/ {
|
|||
default_type text/html;
|
||||
index index.html;
|
||||
|
||||
more_set_headers "Content-Security-Policy: upgrade-insecure-requests; default-src 'self'; connect-src 'self' https://raw.githubusercontent.com https://paste.yunohost.org wss://$host; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-eval'; object-src 'none';";
|
||||
more_set_headers "Content-Security-Policy-Report-Only:";
|
||||
|
||||
# Short cache on handlebars templates
|
||||
location ~* \.(?:ms)$ {
|
||||
expires 5m;
|
||||
|
|
3
data/templates/nginx/redirect_to_admin.conf
Normal file
3
data/templates/nginx/redirect_to_admin.conf
Normal file
|
@ -0,0 +1,3 @@
|
|||
location / {
|
||||
return 302 https://$http_host/yunohost/admin;
|
||||
}
|
|
@ -2,27 +2,31 @@ ssl_session_timeout 1d;
|
|||
ssl_session_cache shared:SSL:50m; # about 200000 sessions
|
||||
ssl_session_tickets off;
|
||||
|
||||
# nginx 1.10 in stretch doesn't support TLS1.3 and Mozilla doesn't have any
|
||||
# "modern" config recommendation with it.
|
||||
# So until buster the modern conf is same as intermediate
|
||||
{% if compatibility == "modern" %} {% else %} {% endif %}
|
||||
|
||||
{% if compatibility == "modern" %}
|
||||
# generated 2020-08-14, Mozilla Guideline v5.6, nginx 1.14.2, OpenSSL 1.1.1d, modern configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=modern&openssl=1.1.1d&guideline=5.6
|
||||
ssl_protocols TLSv1.3;
|
||||
ssl_prefer_server_ciphers off;
|
||||
{% else %}
|
||||
# Ciphers with intermediate compatibility
|
||||
# generated 2020-04-03, Mozilla Guideline v5.4, nginx 1.10.3, OpenSSL 1.1.0l, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.10.3&config=intermediate&openssl=1.1.0l&guideline=5.4
|
||||
ssl_protocols TLSv1.2;
|
||||
# generated 2020-08-14, Mozilla Guideline v5.6, nginx 1.14.2, OpenSSL 1.1.1d, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.6
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
|
||||
ssl_prefer_server_ciphers off;
|
||||
|
||||
# Uncomment the following directive after DH generation
|
||||
# > openssl dhparam -out /etc/ssl/private/dh2048.pem -outform PEM -2 2048
|
||||
#ssl_dhparam /etc/ssl/private/dh2048.pem;
|
||||
# Pre-defined FFDHE group (RFC 7919)
|
||||
# From https://ssl-config.mozilla.org/ffdhe2048.txt
|
||||
# https://security.stackexchange.com/a/149818
|
||||
ssl_dhparam /usr/share/yunohost/other/ffdhe2048.pem;
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners
|
||||
# https://wiki.mozilla.org/Security/Guidelines/Web_Security
|
||||
# https://observatory.mozilla.org/
|
||||
more_set_headers "Content-Security-Policy : upgrade-insecure-requests";
|
||||
more_set_headers "Content-Security-Policy-Report-Only : default-src https: data: 'unsafe-inline' 'unsafe-eval'";
|
||||
more_set_headers "Content-Security-Policy-Report-Only : default-src https: data: 'unsafe-inline' 'unsafe-eval' ";
|
||||
more_set_headers "X-Content-Type-Options : nosniff";
|
||||
more_set_headers "X-XSS-Protection : 1; mode=block";
|
||||
more_set_headers "X-Download-Options : noopen";
|
||||
|
|
|
@ -2,13 +2,7 @@ server {
|
|||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
location / {
|
||||
return 302 https://$http_host/yunohost/admin;
|
||||
}
|
||||
|
||||
location /yunohost/admin {
|
||||
return 301 https://$http_host$request_uri;
|
||||
}
|
||||
include /etc/nginx/conf.d/default.d/*.conf;
|
||||
}
|
||||
|
||||
server {
|
||||
|
@ -22,23 +16,13 @@ server {
|
|||
|
||||
more_set_headers "Strict-Transport-Security : max-age=63072000; includeSubDomains; preload";
|
||||
more_set_headers "Referrer-Policy : 'same-origin'";
|
||||
more_set_headers "Content-Security-Policy : upgrade-insecure-requests; object-src 'none'; script-src https: 'unsafe-eval'";
|
||||
|
||||
location / {
|
||||
return 302 https://$http_host/yunohost/admin;
|
||||
}
|
||||
|
||||
location /yunohost {
|
||||
# Block crawlers bot
|
||||
if ($http_user_agent ~ (crawl|Googlebot|Slurp|spider|bingbot|tracker|click|parser|spider|facebookexternalhit) ) {
|
||||
return 403;
|
||||
}
|
||||
# X-Robots-Tag to precise the rules applied.
|
||||
add_header X-Robots-Tag "nofollow, noindex, noarchive, nosnippet";
|
||||
# Redirect most of 404 to maindomain.tld/yunohost/sso
|
||||
access_by_lua_file /usr/share/ssowat/access.lua;
|
||||
}
|
||||
|
||||
include /etc/nginx/conf.d/yunohost_admin.conf.inc;
|
||||
include /etc/nginx/conf.d/yunohost_api.conf.inc;
|
||||
include /etc/nginx/conf.d/default.d/*.conf;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,18 @@ base dc=yunohost,dc=org
|
|||
# The LDAP protocol version to use.
|
||||
#ldap_version 3
|
||||
|
||||
# The DN to bind with for normal lookups.
|
||||
#binddn cn=annonymous,dc=example,dc=net
|
||||
#bindpw secret
|
||||
|
||||
# The DN used for password modifications by root.
|
||||
#rootpwmoddn cn=admin,dc=example,dc=com
|
||||
|
||||
# SSL options
|
||||
#ssl off
|
||||
#tls_reqcert never
|
||||
tls_cacertfile /etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
# The search scope.
|
||||
#scope sub
|
||||
|
||||
|
|
|
@ -1,12 +1,8 @@
|
|||
# /etc/nsswitch.conf
|
||||
#
|
||||
# Example configuration of GNU Name Service Switch functionality.
|
||||
# If you have the `glibc-doc-reference' and `info' packages installed, try:
|
||||
# `info libc "Name Service Switch"' for information about this file.
|
||||
|
||||
passwd: compat ldap
|
||||
group: compat ldap
|
||||
shadow: compat ldap
|
||||
passwd: files systemd ldap
|
||||
group: files systemd ldap
|
||||
shadow: files ldap
|
||||
gshadow: files
|
||||
|
||||
hosts: files myhostname mdns4_minimal [NOTFOUND=return] dns
|
||||
|
|
|
@ -19,34 +19,35 @@ readme_directory = no
|
|||
|
||||
# -- TLS for incoming connections
|
||||
###############################################################################
|
||||
# generated 2020-04-03, Mozilla Guideline v5.4, Postfix 3.1.14, OpenSSL 1.1.0l, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=postfix&version=3.1.14&config=intermediate&openssl=1.1.0l&guideline=5.4
|
||||
|
||||
# (No modern conf support until we're on buster...)
|
||||
# {% if compatibility == "intermediate" %} {% else %} {% endif %}
|
||||
|
||||
smtpd_use_tls = yes
|
||||
|
||||
smtpd_tls_security_level = may
|
||||
smtpd_tls_auth_only = yes
|
||||
smtpd_tls_cert_file = /etc/yunohost/certs/{{ main_domain }}/crt.pem
|
||||
smtpd_tls_key_file = /etc/yunohost/certs/{{ main_domain }}/key.pem
|
||||
|
||||
{% if compatibility == "intermediate" %}
|
||||
# generated 2020-08-18, Mozilla Guideline v5.6, Postfix 3.4.14, OpenSSL 1.1.1d, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=postfix&version=3.4.14&config=intermediate&openssl=1.1.1d&guideline=5.6
|
||||
|
||||
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
smtpd_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
# smtpd_tls_mandatory_ciphers = medium # (c.f. below)
|
||||
smtpd_tls_mandatory_ciphers = medium
|
||||
|
||||
# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam.pem
|
||||
# not actually 1024 bits, this applies to all DHE >= 1024 bits
|
||||
# smtpd_tls_dh1024_param_file = /path/to/dhparam.pem
|
||||
smtpd_tls_dh1024_param_file = /usr/share/yunohost/other/ffdhe2048.pem;
|
||||
|
||||
tls_medium_cipherlist = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
|
||||
{% else %}
|
||||
# generated 2020-08-18, Mozilla Guideline v5.6, Postfix 3.4.14, OpenSSL 1.1.1d, modern configuration
|
||||
# https://ssl-config.mozilla.org/#server=postfix&version=3.4.14&config=modern&openssl=1.1.1d&guideline=5.6
|
||||
|
||||
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1, !TLSv1.2
|
||||
smtpd_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1, !TLSv1.2
|
||||
{% else %}
|
||||
|
||||
# This custom medium cipherlist recommendation only works if we have a DH ... which we don't, c.f. https://github.com/YunoHost/issues/issues/93
|
||||
# On the other hand, the postfix doc strongly discourage tweaking this list ... So whatever, let's keep the mandatory_ciphers to high like we did before applying the Mozilla recommendation ...
|
||||
#tls_medium_cipherlist = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
|
||||
tls_preempt_cipherlist = no
|
||||
|
||||
# Custom Yunohost stuff ... because we can't use the recommendation about medium cipher list ...
|
||||
smtpd_tls_mandatory_ciphers=high
|
||||
smtpd_tls_eecdh_grade = ultra
|
||||
###############################################################################
|
||||
smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache
|
||||
smtpd_tls_loglevel=1
|
||||
|
@ -170,7 +171,7 @@ smtpd_milters = inet:localhost:11332
|
|||
milter_default_action = accept
|
||||
|
||||
# Avoid to send simultaneously too many emails
|
||||
smtp_destination_concurrency_limit = 1
|
||||
smtp_destination_concurrency_limit = 2
|
||||
default_destination_rate_delay = 5s
|
||||
|
||||
# Avoid email adress scanning
|
||||
|
|
|
@ -2,58 +2,62 @@
|
|||
## Version 0.1
|
||||
## Adrien Beudin
|
||||
|
||||
dn: cn=mailserver,cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: mailserver
|
||||
#
|
||||
# Attributes
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.1
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.1
|
||||
NAME 'maildrop'
|
||||
DESC 'Mail addresses where mails are forwarded -- ie forwards'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.2
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.2
|
||||
NAME 'mailalias'
|
||||
DESC 'Mail addresses accepted by this account -- ie aliases'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.3
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.3
|
||||
NAME 'mailenable'
|
||||
DESC 'Mail Account validity'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{8})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.4
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.4
|
||||
NAME 'mailbox'
|
||||
DESC 'Mailbox path where mails are delivered'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.5
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.5
|
||||
NAME 'virtualdomain'
|
||||
DESC 'A mail domain name'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.6
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.6
|
||||
NAME 'virtualdomaindescription'
|
||||
DESC 'Virtual domain description'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.7
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.7
|
||||
NAME 'mailuserquota'
|
||||
DESC 'Mailbox quota for a user'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{16} SINGLE-VALUE )
|
||||
|
||||
#
|
||||
# Mail Account Objectclass
|
||||
objectclass ( 1.3.6.1.4.1.40328.1.1.2.1
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.40328.1.1.2.1
|
||||
NAME 'mailAccount'
|
||||
DESC 'Mail Account'
|
||||
SUP top
|
||||
|
@ -65,9 +69,9 @@ objectclass ( 1.3.6.1.4.1.40328.1.1.2.1
|
|||
mailalias $ maildrop $ mailenable $ mailbox $ mailuserquota
|
||||
)
|
||||
)
|
||||
|
||||
#
|
||||
# Mail Domain Objectclass
|
||||
objectclass ( 1.3.6.1.4.1.40328.1.1.2.2
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.40328.1.1.2.2
|
||||
NAME 'mailDomain'
|
||||
DESC 'Domain mail entry'
|
||||
SUP top
|
||||
|
@ -79,9 +83,9 @@ objectclass ( 1.3.6.1.4.1.40328.1.1.2.2
|
|||
virtualdomaindescription $ mailuserquota
|
||||
)
|
||||
)
|
||||
|
||||
#
|
||||
# Mail Group Objectclass
|
||||
objectclass ( 1.3.6.1.4.1.40328.1.1.2.3
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.40328.1.1.2.3
|
||||
NAME 'mailGroup' SUP top AUXILIARY
|
||||
DESC 'Mail Group'
|
||||
MUST ( mail )
|
|
@ -1,48 +1,50 @@
|
|||
#dn: cn=yunohost,cn=schema,cn=config
|
||||
#objectClass: olcSchemaConfig
|
||||
#cn: yunohost
|
||||
# Yunohost schema for group and permission support
|
||||
|
||||
dn: cn=yunohost,cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: yunohost
|
||||
# ATTRIBUTES
|
||||
# For Permission
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.1 NAME 'permission'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.1 NAME 'permission'
|
||||
DESC 'Yunohost permission on user and group side'
|
||||
SUP distinguishedName )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.2 NAME 'groupPermission'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.2 NAME 'groupPermission'
|
||||
DESC 'Yunohost permission for a group on permission side'
|
||||
SUP distinguishedName )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.3 NAME 'inheritPermission'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.3 NAME 'inheritPermission'
|
||||
DESC 'Yunohost permission for user on permission side'
|
||||
SUP distinguishedName )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.4 NAME 'URL'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.4 NAME 'URL'
|
||||
DESC 'Yunohost permission main URL'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} SINGLE-VALUE )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.5 NAME 'additionalUrls'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.5 NAME 'additionalUrls'
|
||||
DESC 'Yunohost permission additionnal URL'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.6 NAME 'authHeader'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.6 NAME 'authHeader'
|
||||
DESC 'Yunohost application, enable authentication header'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.7 NAME 'label'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.7 NAME 'label'
|
||||
DESC 'Yunohost permission label, also used for the tile name in the SSO'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} SINGLE-VALUE )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.8 NAME 'showTile'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.8 NAME 'showTile'
|
||||
DESC 'Yunohost application, show/hide the tile in the SSO for this permission'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.9 NAME 'isProtected'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.9 NAME 'isProtected'
|
||||
DESC 'Yunohost application permission protection'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||
# OBJECTCLASS
|
||||
# For Applications
|
||||
objectclass ( 1.3.6.1.4.1.17953.9.2.1 NAME 'groupOfNamesYnh'
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.17953.9.2.1 NAME 'groupOfNamesYnh'
|
||||
DESC 'Yunohost user group'
|
||||
SUP top AUXILIARY
|
||||
MAY ( member $ businessCategory $ seeAlso $ owner $ ou $ o $ permission ) )
|
||||
objectclass ( 1.3.6.1.4.1.17953.9.2.2 NAME 'permissionYnh'
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.17953.9.2.2 NAME 'permissionYnh'
|
||||
DESC 'a Yunohost application'
|
||||
SUP top AUXILIARY
|
||||
MUST ( cn $ authHeader $ label $ showTile $ isProtected )
|
||||
MAY ( groupPermission $ inheritPermission $ URL $ additionalUrls ) )
|
||||
# For User
|
||||
objectclass ( 1.3.6.1.4.1.17953.9.2.3 NAME 'userPermissionYnh'
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.17953.9.2.3 NAME 'userPermissionYnh'
|
||||
DESC 'a Yunohost application'
|
||||
SUP top AUXILIARY
|
||||
MAY ( permission ) )
|
|
@ -1,154 +0,0 @@
|
|||
# This is the main slapd configuration file. See slapd.conf(5) for more
|
||||
# info on the configuration options.
|
||||
|
||||
#######################################################################
|
||||
# Global Directives:
|
||||
|
||||
# Features to permit
|
||||
#allow bind_v2
|
||||
|
||||
# Schema and objectClass definitions
|
||||
include /etc/ldap/schema/core.schema
|
||||
include /etc/ldap/schema/cosine.schema
|
||||
include /etc/ldap/schema/nis.schema
|
||||
include /etc/ldap/schema/inetorgperson.schema
|
||||
include /etc/ldap/schema/mailserver.schema
|
||||
include /etc/ldap/schema/sudo.schema
|
||||
include /etc/ldap/schema/yunohost.schema
|
||||
|
||||
# Where the pid file is put. The init.d script
|
||||
# will not stop the server if you change this.
|
||||
pidfile /var/run/slapd/slapd.pid
|
||||
|
||||
# List of arguments that were passed to the server
|
||||
argsfile /var/run/slapd/slapd.args
|
||||
|
||||
# Read slapd.conf(5) for possible values
|
||||
loglevel none
|
||||
|
||||
# Hashes to be used in generation of user passwords
|
||||
password-hash {SSHA}
|
||||
|
||||
# Where the dynamically loaded modules are stored
|
||||
modulepath /usr/lib/ldap
|
||||
moduleload back_mdb
|
||||
moduleload memberof
|
||||
|
||||
# The maximum number of entries that is returned for a search operation
|
||||
sizelimit 500
|
||||
|
||||
# The tool-threads parameter sets the actual amount of cpu's that is used
|
||||
# for indexing.
|
||||
tool-threads 1
|
||||
|
||||
# TLS Support
|
||||
TLSCertificateFile /etc/yunohost/certs/yunohost.org/crt.pem
|
||||
TLSCertificateKeyFile /etc/yunohost/certs/yunohost.org/key.pem
|
||||
|
||||
#######################################################################
|
||||
# Specific Backend Directives for mdb:
|
||||
# Backend specific directives apply to this backend until another
|
||||
# 'backend' directive occurs
|
||||
backend mdb
|
||||
|
||||
#######################################################################
|
||||
# Specific Directives for database #1, of type mdb:
|
||||
# Database specific directives apply to this databasse until another
|
||||
# 'database' directive occurs
|
||||
database mdb
|
||||
|
||||
# The base of your directory in database #1
|
||||
suffix "dc=yunohost,dc=org"
|
||||
|
||||
# rootdn directive for specifying a superuser on the database. This is needed
|
||||
# for syncrepl.
|
||||
# rootdn "cn=admin,dc=yunohost,dc=org"
|
||||
|
||||
# Where the database file are physically stored for database #1
|
||||
directory "/var/lib/ldap"
|
||||
|
||||
# Indexing options for database #1
|
||||
index objectClass eq
|
||||
index uid,sudoUser eq,sub
|
||||
index entryCSN,entryUUID eq
|
||||
index cn,mail eq
|
||||
index gidNumber,uidNumber eq
|
||||
index member,memberUid,uniqueMember eq
|
||||
index virtualdomain eq
|
||||
index permission eq
|
||||
|
||||
# Save the time that the entry gets modified, for database #1
|
||||
lastmod on
|
||||
|
||||
# Checkpoint the BerkeleyDB database periodically in case of system
|
||||
# failure and to speed slapd shutdown.
|
||||
checkpoint 512 30
|
||||
|
||||
# The userPassword by default can be changed
|
||||
# by the entry owning it if they are authenticated.
|
||||
# Others should not be able to see it, except the
|
||||
# admin entry below
|
||||
# These access lines apply to database #1 only
|
||||
access to attrs=userPassword,shadowLastChange
|
||||
by dn="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by anonymous auth
|
||||
by self write
|
||||
by * none
|
||||
|
||||
# Personnal information can be changed by the entry
|
||||
# owning it if they are authenticated.
|
||||
# Others should be able to see it.
|
||||
access to attrs=cn,gecos,givenName,mail,maildrop,displayName,sn
|
||||
by dn="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by self write
|
||||
by * read
|
||||
|
||||
# Ensure read access to the base for things like
|
||||
# supportedSASLMechanisms. Without this you may
|
||||
# have problems with SASL not knowing what
|
||||
# mechanisms are available and the like.
|
||||
# Note that this is covered by the 'access to *'
|
||||
# ACL below too but if you change that as people
|
||||
# are wont to do you'll still need this if you
|
||||
# want SASL (and possible other things) to work
|
||||
# happily.
|
||||
access to dn.base="" by * read
|
||||
|
||||
# The admin dn has full write access, everyone else
|
||||
# can read everything.
|
||||
access to *
|
||||
by dn="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by group/groupOfNames/Member="cn=admin,ou=groups,dc=yunohost,dc=org" write
|
||||
by * read
|
||||
|
||||
# Configure Memberof Overlay (used for Yunohost permission)
|
||||
|
||||
# Link user <-> group
|
||||
#dn: olcOverlay={0}memberof,olcDatabase={1}mdb,cn=config
|
||||
overlay memberof
|
||||
memberof-group-oc groupOfNamesYnh
|
||||
memberof-member-ad member
|
||||
memberof-memberof-ad memberOf
|
||||
memberof-dangling error
|
||||
memberof-refint TRUE
|
||||
|
||||
# Link permission <-> groupes
|
||||
#dn: olcOverlay={1}memberof,olcDatabase={1}mdb,cn=config
|
||||
overlay memberof
|
||||
memberof-group-oc permissionYnh
|
||||
memberof-member-ad groupPermission
|
||||
memberof-memberof-ad permission
|
||||
memberof-dangling error
|
||||
memberof-refint TRUE
|
||||
|
||||
# Link permission <-> user
|
||||
#dn: olcOverlay={2}memberof,olcDatabase={1}mdb,cn=config
|
||||
overlay memberof
|
||||
memberof-group-oc permissionYnh
|
||||
memberof-member-ad inheritPermission
|
||||
memberof-memberof-ad permission
|
||||
memberof-dangling error
|
||||
memberof-refint TRUE
|
235
data/templates/slapd/slapd.ldif
Normal file
235
data/templates/slapd/slapd.ldif
Normal file
|
@ -0,0 +1,235 @@
|
|||
# OpenLDAP server configuration for Yunohost
|
||||
# ------------------------------------------
|
||||
#
|
||||
# Because of the Yunohost's regen-conf mechanism, it is NOT POSSIBLE to
|
||||
# edit the config database using an LDAP request.
|
||||
#
|
||||
# If you wish to edit the config database, you should edit THIS file
|
||||
# and update the config database based on this file.
|
||||
#
|
||||
# Config database customization:
|
||||
# 1. Edit this file as you want.
|
||||
# 2. Apply your modifications. For this just run this following command in a shell:
|
||||
# $ /usr/share/yunohost/hooks/conf_regen/06-slapd apply_config
|
||||
#
|
||||
# Note that if you customize this file, YunoHost's regen-conf will NOT
|
||||
# overwrite this file. But that also means that you should be careful about
|
||||
# upgrades, because they may ship important/necessary changes to this
|
||||
# configuration that you will have to propagate yourself.
|
||||
|
||||
#
|
||||
# Main configuration
|
||||
#
|
||||
dn: cn=config
|
||||
objectClass: olcGlobal
|
||||
cn: config
|
||||
olcConfigFile: /etc/ldap/slapd.conf
|
||||
olcConfigDir: /etc/ldap/slapd.d/
|
||||
# List of arguments that were passed to the server
|
||||
olcArgsFile: /var/run/slapd/slapd.args
|
||||
#
|
||||
olcAttributeOptions: lang-
|
||||
olcAuthzPolicy: none
|
||||
olcConcurrency: 0
|
||||
olcConnMaxPending: 100
|
||||
olcConnMaxPendingAuth: 1000
|
||||
olcIdleTimeout: 0
|
||||
olcIndexSubstrIfMaxLen: 4
|
||||
olcIndexSubstrIfMinLen: 2
|
||||
olcIndexSubstrAnyLen: 4
|
||||
olcIndexSubstrAnyStep: 2
|
||||
olcIndexIntLen: 4
|
||||
olcListenerThreads: 1
|
||||
olcLocalSSF: 71
|
||||
# Read slapd.conf(5) for possible values
|
||||
olcLogLevel: None
|
||||
# Where the pid file is put. The init.d script
|
||||
# will not stop the server if you change this.
|
||||
olcPidFile: /var/run/slapd/slapd.pid
|
||||
olcReverseLookup: FALSE
|
||||
olcThreads: 16
|
||||
# TLS Support
|
||||
olcTLSCertificateFile: /etc/yunohost/certs/yunohost.org/crt.pem
|
||||
olcTLSCertificateKeyFile: /etc/yunohost/certs/yunohost.org/key.pem
|
||||
olcTLSVerifyClient: never
|
||||
olcTLSProtocolMin: 0.0
|
||||
# The tool-threads parameter sets the actual amount of cpu's that is used
|
||||
# for indexing.
|
||||
olcToolThreads: 1
|
||||
structuralObjectClass: olcGlobal
|
||||
|
||||
#
|
||||
# Schema and objectClass definitions
|
||||
#
|
||||
dn: cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: schema
|
||||
|
||||
include: file:///etc/ldap/schema/core.ldif
|
||||
include: file:///etc/ldap/schema/cosine.ldif
|
||||
include: file:///etc/ldap/schema/nis.ldif
|
||||
include: file:///etc/ldap/schema/inetorgperson.ldif
|
||||
include: file:///etc/ldap/schema/mailserver.ldif
|
||||
include: file:///etc/ldap/schema/sudo.ldif
|
||||
include: file:///etc/ldap/schema/permission.ldif
|
||||
|
||||
#
|
||||
# Module management
|
||||
#
|
||||
dn: cn=module{0},cn=config
|
||||
objectClass: olcModuleList
|
||||
cn: module{0}
|
||||
# Where the dynamically loaded modules are stored
|
||||
olcModulePath: /usr/lib/ldap
|
||||
olcModuleLoad: {0}back_mdb
|
||||
olcModuleLoad: {1}memberof
|
||||
structuralObjectClass: olcModuleList
|
||||
|
||||
#
|
||||
# Frontend database
|
||||
#
|
||||
dn: olcDatabase={-1}frontend,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
objectClass: olcFrontendConfig
|
||||
olcDatabase: {-1}frontend
|
||||
olcAddContentAcl: FALSE
|
||||
olcLastMod: TRUE
|
||||
olcSchemaDN: cn=Subschema
|
||||
# Hashes to be used in generation of user passwords
|
||||
olcPasswordHash: {SSHA}
|
||||
structuralObjectClass: olcDatabaseConfig
|
||||
|
||||
#
|
||||
# Config database Configuration (#0)
|
||||
#
|
||||
dn: olcDatabase={0}config,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
olcDatabase: {0}config
|
||||
# Give access to root user.
|
||||
# This give the possiblity to the admin to customize the LDAP configuration
|
||||
olcAccess: {0}to * by * none
|
||||
olcAddContentAcl: TRUE
|
||||
olcLastMod: TRUE
|
||||
olcRootDN: cn=config
|
||||
structuralObjectClass: olcDatabaseConfig
|
||||
|
||||
#
|
||||
# Main database Configuration (#1)
|
||||
#
|
||||
dn: olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
objectClass: olcMdbConfig
|
||||
olcDatabase: {1}mdb
|
||||
# The base of your directory in database #1
|
||||
olcSuffix: dc=yunohost,dc=org
|
||||
#
|
||||
# The userPassword by default can be changed
|
||||
# by the entry owning it if they are authenticated.
|
||||
# Others should not be able to see it, except the
|
||||
# admin entry below
|
||||
# These access lines apply to database #1 only
|
||||
olcAccess: {0}to attrs=userPassword,shadowLastChange
|
||||
by dn.base="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by anonymous auth
|
||||
by self write
|
||||
by * none
|
||||
#
|
||||
# Personnal information can be changed by the entry
|
||||
# owning it if they are authenticated.
|
||||
# Others should be able to see it.
|
||||
olcAccess: {1}to attrs=cn,gecos,givenName,mail,maildrop,displayName,sn
|
||||
by dn.base="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by self write
|
||||
by * read
|
||||
#
|
||||
# Ensure read access to the base for things like
|
||||
# supportedSASLMechanisms. Without this you may
|
||||
# have problems with SASL not knowing what
|
||||
# mechanisms are available and the like.
|
||||
# Note that this is covered by the 'access to *'
|
||||
# ACL below too but if you change that as people
|
||||
# are wont to do you'll still need this if you
|
||||
# want SASL (and possible other things) to work
|
||||
# happily.
|
||||
olcAccess: {2}to dn.base=""
|
||||
by * read
|
||||
#
|
||||
# The admin dn has full write access, everyone else
|
||||
# can read everything.
|
||||
olcAccess: {3}to *
|
||||
by dn.base="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by group/groupOfNames/member.exact="cn=admin,ou=groups,dc=yunohost,dc=org" write
|
||||
by * read
|
||||
#
|
||||
olcAddContentAcl: FALSE
|
||||
# Save the time that the entry gets modified, for database #1
|
||||
olcLastMod: TRUE
|
||||
# Where the database file are physically stored for database #1
|
||||
olcDbDirectory: /var/lib/ldap
|
||||
# Checkpoint the BerkeleyDB database periodically in case of system
|
||||
# failure and to speed slapd shutdown.
|
||||
olcDbCheckpoint: 512 30
|
||||
olcDbNoSync: FALSE
|
||||
# Indexing options for database #1
|
||||
olcDbIndex: objectClass eq
|
||||
olcDbIndex: entryUUID eq
|
||||
olcDbIndex: entryCSN eq
|
||||
olcDbIndex: cn eq
|
||||
olcDbIndex: uid eq,sub
|
||||
olcDbIndex: uidNumber eq
|
||||
olcDbIndex: gidNumber eq
|
||||
olcDbIndex: sudoUser eq,sub
|
||||
olcDbIndex: member eq
|
||||
olcDbIndex: mail eq
|
||||
olcDbIndex: memberUid eq
|
||||
olcDbIndex: uniqueMember eq
|
||||
olcDbIndex: virtualdomain eq
|
||||
olcDbIndex: permission eq
|
||||
olcDbMaxSize: 10485760
|
||||
structuralObjectClass: olcMdbConfig
|
||||
|
||||
#
|
||||
# Configure Memberof Overlay (used for Yunohost permission)
|
||||
#
|
||||
|
||||
# Link user <-> group
|
||||
dn: olcOverlay={0}memberof,olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: {0}memberof
|
||||
olcMemberOfDangling: error
|
||||
olcMemberOfDanglingError: constraintViolation
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: groupOfNamesYnh
|
||||
olcMemberOfMemberAD: member
|
||||
olcMemberOfMemberOfAD: memberOf
|
||||
structuralObjectClass: olcMemberOf
|
||||
|
||||
# Link permission <-> groupes
|
||||
dn: olcOverlay={1}memberof,olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: {1}memberof
|
||||
olcMemberOfDangling: error
|
||||
olcMemberOfDanglingError: constraintViolation
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: permissionYnh
|
||||
olcMemberOfMemberAD: groupPermission
|
||||
olcMemberOfMemberOfAD: permission
|
||||
structuralObjectClass: olcMemberOf
|
||||
|
||||
# Link permission <-> user
|
||||
dn: olcOverlay={2}memberof,olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: {2}memberof
|
||||
olcMemberOfDangling: error
|
||||
olcMemberOfDanglingError: constraintViolation
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: permissionYnh
|
||||
olcMemberOfMemberAD: inheritPermission
|
||||
olcMemberOfMemberOfAD: permission
|
||||
structuralObjectClass: olcMemberOf
|
|
@ -1,76 +1,78 @@
|
|||
#
|
||||
# OpenLDAP schema file for Sudo
|
||||
# Save as /etc/openldap/schema/sudo.schema
|
||||
# Save as /etc/openldap/schema/sudo.ldif
|
||||
#
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.1
|
||||
dn: cn=sudo,cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: sudo
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.1
|
||||
NAME 'sudoUser'
|
||||
DESC 'User(s) who may run sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SUBSTR caseExactIA5SubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.2
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.2
|
||||
NAME 'sudoHost'
|
||||
DESC 'Host(s) who may run sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SUBSTR caseExactIA5SubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.3
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.3
|
||||
NAME 'sudoCommand'
|
||||
DESC 'Command(s) to be executed by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.4
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.4
|
||||
NAME 'sudoRunAs'
|
||||
DESC 'User(s) impersonated by sudo (deprecated)'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.5
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.5
|
||||
NAME 'sudoOption'
|
||||
DESC 'Options(s) followed by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.6
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.6
|
||||
NAME 'sudoRunAsUser'
|
||||
DESC 'User(s) impersonated by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.7
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.7
|
||||
NAME 'sudoRunAsGroup'
|
||||
DESC 'Group(s) impersonated by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.8
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.8
|
||||
NAME 'sudoNotBefore'
|
||||
DESC 'Start of time interval for which the entry is valid'
|
||||
EQUALITY generalizedTimeMatch
|
||||
ORDERING generalizedTimeOrderingMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.9
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.9
|
||||
NAME 'sudoNotAfter'
|
||||
DESC 'End of time interval for which the entry is valid'
|
||||
EQUALITY generalizedTimeMatch
|
||||
ORDERING generalizedTimeOrderingMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 )
|
||||
|
||||
attributeTypes ( 1.3.6.1.4.1.15953.9.1.10
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.10
|
||||
NAME 'sudoOrder'
|
||||
DESC 'an integer to order the sudoRole entries'
|
||||
EQUALITY integerMatch
|
||||
ORDERING integerOrderingMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )
|
||||
|
||||
objectclass ( 1.3.6.1.4.1.15953.9.2.1 NAME 'sudoRole' SUP top STRUCTURAL
|
||||
#
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.15953.9.2.1 NAME 'sudoRole' SUP top STRUCTURAL
|
||||
DESC 'Sudoer Entries'
|
||||
MUST ( cn )
|
||||
MAY ( sudoUser $ sudoHost $ sudoCommand $ sudoRunAs $ sudoRunAsUser $ sudoRunAsGroup $ sudoOption $ sudoOrder $ sudoNotBefore $ sudoNotAfter $
|
||||
description )
|
||||
MAY ( sudoUser $ sudoHost $ sudoCommand $ sudoRunAs $ sudoRunAsUser $ sudoRunAsGroup $ sudoOption $ sudoOrder $ sudoNotBefore $ sudoNotAfter $ description )
|
||||
)
|
|
@ -27,9 +27,6 @@ HostKey {{ key }}{% endfor %}
|
|||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com
|
||||
{% endif %}
|
||||
|
||||
# Use kernel sandbox mechanisms where possible in unprivileged processes
|
||||
UsePrivilegeSeparation sandbox
|
||||
|
||||
# LogLevel VERBOSE logs user's key fingerprint on login.
|
||||
# Needed to have a clear audit track of which key was using to log in.
|
||||
SyslogFacility AUTH
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# https://wiki.debian.org/UnattendedUpgrades#automatic_call_via_.2Fetc.2Fapt.2Fapt.conf.d.2F02periodic
|
||||
APT::Periodic::Enable "1";
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
APT::Periodic::Verbose "1";
|
|
@ -1,36 +0,0 @@
|
|||
// Automatically upgrade packages from these (origin, archive) pairs
|
||||
Unattended-Upgrade::Allowed-Origins {
|
||||
"${distro_id} stable";
|
||||
"${distro_id} testing";
|
||||
"Depot-Debian testing";
|
||||
"${distro_id} ${distro_codename}-security";
|
||||
"${distro_id} ${distro_codename}-updates";
|
||||
// "${distro_id} ${distro_codename}-proposed-updates";
|
||||
};
|
||||
|
||||
// List of packages to not update
|
||||
Unattended-Upgrade::Package-Blacklist {
|
||||
// "vim";
|
||||
// "libc6";
|
||||
// "libc6-dev";
|
||||
// "libc6-i686";
|
||||
};
|
||||
|
||||
// Send email to this address for problems or packages upgrades
|
||||
// If empty or unset then no email is sent, make sure that you
|
||||
// have a working mail setup on your system. The package 'mailx'
|
||||
// must be installed or anything that provides /usr/bin/mail.
|
||||
//Unattended-Upgrade::Mail "root@localhost";
|
||||
|
||||
// Do automatic removal of new unused dependencies after the upgrade
|
||||
// (equivalent to apt-get autoremove)
|
||||
Unattended-Upgrade::Remove-Unused-Dependencies "true";
|
||||
|
||||
// Automatically reboot *WITHOUT CONFIRMATION* if a
|
||||
// the file /var/run/reboot-required is found after the upgrade
|
||||
Unattended-Upgrade::Automatic-Reboot "false";
|
||||
|
||||
|
||||
// Use apt bandwidth limit feature, this example limits the download
|
||||
// speed to 70kb/sec
|
||||
//Acquire::http::Dl-Limit "70";
|
|
@ -20,10 +20,9 @@ nginx:
|
|||
test_conf: nginx -t
|
||||
needs_exposed_ports: [80, 443]
|
||||
category: web
|
||||
nslcd: {}
|
||||
php7.0-fpm:
|
||||
log: /var/log/php7.0-fpm.log
|
||||
test_conf: php-fpm7.0 --test
|
||||
php7.3-fpm:
|
||||
log: /var/log/php7.3-fpm.log
|
||||
test_conf: php-fpm7.3 --test
|
||||
category: web
|
||||
postfix:
|
||||
log: [/var/log/mail.log,/var/log/mail.err]
|
||||
|
@ -64,3 +63,5 @@ postgrey: null
|
|||
spamassassin: null
|
||||
rmilter: null
|
||||
php5-fpm: null
|
||||
php7.0-fpm: null
|
||||
nslcd: null
|
||||
|
|
316
debian/changelog
vendored
316
debian/changelog
vendored
|
@ -1,3 +1,311 @@
|
|||
yunohost (4.0.4) stable; urgency=low
|
||||
|
||||
- Debugging and robustness improvements for postgresql 9.6 -> 11 and xtables->nftables migrations (accc2da4, 59bd7d66, 4cb6f7fd, 4b14402c)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 12 Aug 2020 18:14:00 +0200
|
||||
|
||||
yunohost (4.0.3) stable; urgency=low
|
||||
|
||||
- Bump version number for stable release
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 29 Jul 2020 17:00:00 +0200
|
||||
|
||||
yunohost (4.0.2~beta) testing; urgency=low
|
||||
|
||||
- [mod] Rebase on stretch-unstable to include recent changes
|
||||
- [fix] Create admin's home during postinstall (#1021)
|
||||
|
||||
Thanks to all contributors <3 ! (Kay0u)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 19 Jun 2020 15:16:26 +0200
|
||||
|
||||
yunohost (4.0.1~alpha) testing; urgency=low
|
||||
|
||||
- [fix] It just make no sense to backup/restore the mysql password... (#911)
|
||||
- [fix] Fix getopts and helpers (#885, #886)
|
||||
- [fix] Explicitly create home using mkhomedir_helper instead of obscure pam rule that doesn't work anymore (b67ff314)
|
||||
- [fix] Ldap interface seems to expect lists everywhere now? (fb8c2b7b)
|
||||
- [deb] Clean control file, remove some legacy Conflicts and Replaces (ca0d4933)
|
||||
- [deb] Add conflicts with versions from backports for critical dependencies (#967)
|
||||
- [cleanup] Stale / legacy code (217aaa36, d77da6a0, af047468, 82d468a3)
|
||||
- [conf] Automatically disable/stop systemd-resolved that conflicts with dnsmasq on fresh setups ... (e7214b37)
|
||||
- [conf] Remove deprecated option in sshd conf, c.f. https://patchwork.openembedded.org/patch/139981/ (2723d245)
|
||||
- [conf] Small tweak in dovecot conf (deprecated settings) (dc0481e2)
|
||||
- [conf] Update nslcd and nsswitch stuff using new Buster's default configs + get rid of nslcd service, only keep the regen-conf part (6ef3520f)
|
||||
- [php] Migrate from php7.0 to php7.3 (3374e653, 9be10506, dd9564d3, 9679c291, 212a15e4, 25fcaa19, c4ad66f5)
|
||||
- [psql] Migrate from psql 11 to 9.6 (e88aed72, 4920d4f9, c70b0ae4)
|
||||
- [firewall] Migrate from xtable to nftable (05fb58f2, 2c4a8b73, 625d5372)
|
||||
- [slapd] Rework slapd regenconf to use new backend (#984)
|
||||
|
||||
Thanks to all contributors <3 ! (Étienne M., Josué, Kay0u)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 05 Jun 2020 03:10:09 +0200
|
||||
|
||||
yunohost (3.8.5.5) stable; urgency=low
|
||||
|
||||
- [enh] Allow to extend the nginx default_server configuration (f1bfc521)
|
||||
- [mod] Move redirect to /yunohost/admin to a separate nginx conf file to allow customizing it more easily (ac9182d6)
|
||||
- [enh] Make sure to validate/upgrade that we don't have any active weak certificate used by nginx at the beginning of the buster migration, otherwise nginx will later miserably fail to start (d4358897)
|
||||
- [fix] get_files_diff crashing if {orig,new}_file is None (7bfe564a)
|
||||
- [enh] Remove some useless message about file that "wasn't deleted because it doesn't exist." (#1024)
|
||||
- [mod] Remove useless robot protection code (#1026)
|
||||
- [fix] Let's not redefine the value for the 'service' var ... (1a2f26dc)
|
||||
- [fix] More general stretch->buster patching for sources.list (#1028)
|
||||
- [mod] Tweak custom disclaimer about the migration still being a bit touchy in preparation for stable release (852dea07)
|
||||
- [mod] Typo/wording in en.json (#1030)
|
||||
- [i18n] Translations updated for Catalan, French, Italian, Occitan
|
||||
|
||||
Thanks to all contributors <3 ! (É. Gaspar, Kay0u, L. Noferini, ppr, Quentí, xaloc33)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Mon, 27 Jul 2020 19:03:33 +0200
|
||||
|
||||
yunohost (3.8.5.4) testing; urgency=low
|
||||
|
||||
- [fix] Fix unscd version parsing *again*
|
||||
- [fix] Enforce permissions on rspamd log directory
|
||||
- [enh] Ignore stupid warnings about sudo-ldap that is already provided
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sun, 21 Jun 2020 23:37:09 +0200
|
||||
|
||||
yunohost (3.8.5.3) testing; urgency=low
|
||||
|
||||
- [fix] Fix the fix about unscd downgrade :/
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 19 Jun 2020 18:50:58 +0200
|
||||
|
||||
yunohost (3.8.5.2) testing; urgency=low
|
||||
|
||||
- [fix] Small issue with unscd upgrade/downgrade ... new version ain't always 0.53.1, so find it using dirty scrapping
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 18 Jun 2020 16:19:35 +0200
|
||||
|
||||
yunohost (3.8.5.1) testing; urgency=low
|
||||
|
||||
- [fix] Update Stretch->Buster migration disclaimer to make it clear that this is alpha-stage
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 06 Jun 2020 03:30:00 +0200
|
||||
|
||||
yunohost (3.8.5) testing; urgency=low
|
||||
|
||||
- [enh] Add migration procedure for Stretch->Buster (a2b83c0f, a26411db, 9f1211e9, e544bf3e, a0511cca)
|
||||
- [fix] Disable/skip ntp when inside a container (9d0c0924)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 06 Jun 2020 02:11:51 +0200
|
||||
|
||||
yunohost (3.8.4.9) stable; urgency=low
|
||||
|
||||
- [fix] Force lowercase on domain names (804f4b3e)
|
||||
- [fix] Add dirmngr to Depends:, needed for apt-key / gpg (cd115ed8)
|
||||
- [fix] Improve debugging when diagnosis ain't happy when renewing certs (0f0194be)
|
||||
- [enh] Add yunohost version to logs metadata (d615546b)
|
||||
- [enh] Alway filter irrelevant log lines when sharing it (38704cba, 51d53be5)
|
||||
- [fix] Regen-conf outputing many 'forget-about-it' because of files flagged as to be removed (f4525488)
|
||||
- [fix] postfix per-domain destination concurrency (#988)
|
||||
- [fix] Call regenconf for ssh before the general regenconf during the postinstall to avoid an irrelevant warning (7805837b)
|
||||
- [i18n] Translations updated for Catalan, French, German
|
||||
|
||||
Thanks to all contributors <3 ! (taziden, ljf, ppr, xaloc33, Yasss Gurl)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 18 Jun 2020 15:13:01 +0200
|
||||
|
||||
yunohost (3.8.4.8) stable; urgency=low
|
||||
|
||||
- [fix] Don't add unprotected_urls if it's already in skipped_urls (#1005)
|
||||
- [enh] Add pre-defined DHE group and set up Nginx to use it (#1007)
|
||||
- [fix] Make sure to propagate change in slapd systemd conf during initial install (2d42480f)
|
||||
- [fix] More accurate grep to avoid mistakenly grepping commented lines... (2408a620)
|
||||
- [enh] Update n to 6.5.1 (#1012)
|
||||
- [fix] Set sury default pinning to 600 (653c5fde)
|
||||
- [enh] Clean stale file/hashes in regen-conf (#1009)
|
||||
- [fix] Weirdness in regen-conf mechanism for SSH conf (#1014)
|
||||
|
||||
Thanks to all contributors <3 ! (É. Gaspar, Josué, SohKa)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 06 Jun 2020 01:59:08 +0200
|
||||
|
||||
yunohost (3.8.4.7) stable; urgency=low
|
||||
|
||||
- [fix] Remove some remains of glances (17eec25e)
|
||||
- [fix] Force external resolution for reverse DNS dig (852cd14c)
|
||||
- [fix] Make sure mysql is an alias to mariadb (e24191ce, ca89607d)
|
||||
- [fix] Path for ynh_add_fpm_config template in restore (#1001)
|
||||
- [fix] Add -o Acquire::Retries=3 to fix some stupid network issues happening sometimes with apt (03432349)
|
||||
- [fix] ynh_setup_source: Retry wget on non-critical failures to try to avoid tmp dns issues (3d66eaec)
|
||||
- [fix] ynh_setup_source: Calling ynh_print_err in case of error didn't work, and we probably want a ynh_die here (55036fad)
|
||||
- [i18n] Translations updated for Catalan, French, Italian, Occitan
|
||||
|
||||
Thanks to all contributors <3 ! (JimboJoe, Leandro N., ppr, Quentí, xaloc33, yalh76)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 04 Jun 2020 02:28:33 +0200
|
||||
|
||||
yunohost (3.8.4.6) stable; urgency=low
|
||||
|
||||
- [fix] Bump server_names_hash_bucket_size to 128 to avoid nginx exploding for stupid reasons (b3db4d92)
|
||||
- [fix] More sensible strategy for sury pinning (#1006)
|
||||
- [fix] Stop trying to fetch log categories that are not implemented yet T.T (77bd9ae3)
|
||||
- [enh] Add logging and persistent as default config for new muc room (#1008)
|
||||
- [tests] Moar tests for app args parsing (#1004)
|
||||
|
||||
Thanks to all contributors <3 ! (Gabriel, Kay0u, Bram)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 28 May 2020 00:22:10 +0200
|
||||
|
||||
yunohost (3.8.4.5) stable; urgency=low
|
||||
|
||||
- [enh] Tell systemctl to stfu about creating symlinks when enabling/disabling services (6637c8a8)
|
||||
- [enh] Add maindomain in diagnosis email subject (e30e25fa)
|
||||
- [fix] Webpath should also be normalized for args_list, so that we can get rid of the 'malformed path' check of the CI... (58ce6e5e)
|
||||
- [fix] Increase time window for auto diagnosis cron to avoid remote diagnosis server overload (dc221495)
|
||||
- [fix] encoding bullshit (4c600125, 64596bc1)
|
||||
- [fix] Typo in diagnosis message + fix FR translation report format of bad DNS conf (#1002, b8f8ea14)
|
||||
- [fix] Flag old etckeeper.conf as 'should not exist' in regenconf (5a3b382f)
|
||||
- [enh] Detect dyndns-domains managed by yunohost and advice to use yunohost dyndns update --force (8b169f13)
|
||||
- [enh] Complain if apps savagely edit system configurations during install and upgrade (a23f02db)
|
||||
- [i18n] Translations updated for Arabic, Catalan, French, German, Italian
|
||||
- [tests] CI V2 : Rework CI workflow (#991)
|
||||
|
||||
Thanks to all contributors <3 ! (ButterflyOfFire, Kay0u, L. Noferini, rynas, V. Rubiolo, xaloc33, Yasss Gurl)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Tue, 26 May 2020 03:20:39 +0200
|
||||
|
||||
yunohost (3.8.4.4) stable; urgency=low
|
||||
|
||||
- [fix] Crash when the services file is empty (85f1802)
|
||||
- [fix] IPv6 detection when using wg-quick (#997)
|
||||
- [fix] Use a .get() to avoid crash if key doesn't exist (1f1b2338)
|
||||
- [enh] Don't display the hostname when calling journalctl, this takes horizontal space for nothing (2bcfb5a1)
|
||||
- [fix] Add --quiet, otherwise getopts is confused by "-- Logs" at the beginning (bdbf1822)
|
||||
- [mod] We don't need those color codes... and warnings are already warnings... (2a631fa2)
|
||||
- [fix] psql_setup_db: Do not create a new password if the user already exists (#998)
|
||||
- [enh] Add an exception if packaging format is not recognized (f0cc6798)
|
||||
|
||||
Thanks to all contributors <3 ! (Aleks, Julien Rabier, Kayou)
|
||||
|
||||
-- Kay0u <pierre@kayou.io> Fri, 22 May 2020 19:26:05 +0000
|
||||
|
||||
yunohost (3.8.4.3) stable; urgency=low
|
||||
|
||||
- [fix] Workaround for the sury pinning issues when installing dependencies
|
||||
- [i18n] Translations updated for Catalan, French, Occitan
|
||||
|
||||
Thanks to all contributors <3 ! (Aleks, clecle226, Kay0u, ppr, Quenti)
|
||||
|
||||
-- Kay0u <pierre@kayou.io> Wed, 20 May 2020 18:41:49 +0000
|
||||
|
||||
yunohost (3.8.4.2) testing; urgency=low
|
||||
|
||||
- [enh] During failed upgrades: Only mention packages that couldn't be upgraded (26fcfed7)
|
||||
- [enh] Also run dpkg --audit to check if dpkg is in a broken state (09d8500f, 97199d19)
|
||||
- [enh] Improve logs readability (c6f18496, 9cbd368d, 5850bf61, 413778d2, 5c8c07b8, f73c34bf, 94ea8265)
|
||||
- [enh] Crash early about apps already installed when attempting to restore (f9e4c96c)
|
||||
- [fix] Add the damn short hostname to /etc/hosts automagically (c.f. rabbitmq-server) (e67dc791)
|
||||
- [fix] Don't miserably crash if doveadm fails to run (c9b22138)
|
||||
- [fix] Diagnosis: Try to not have weird warnings if no diagnosis ran yet... (65c87d55)
|
||||
- [fix] Diagnosis: Change logic of --email to avoid sending empty mail if some issues are found but ignored (4cd4938e)
|
||||
- [enh] Diagnosis/services: Report the service status as warning/unknown if service type is oneshot and status exited (dd09758f, 1cd7ffea)
|
||||
- [fix] Rework ynh_psql_test_if_first_run (#993)
|
||||
- [tests] Tests for args parsing (#989, 108a3ca4)
|
||||
|
||||
Thanks to all contributors <3 ! (Bram, Kayou)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Tue, 19 May 2020 20:08:47 +0200
|
||||
|
||||
yunohost (3.8.4.1) testing; urgency=low
|
||||
|
||||
- [mod] Tweak diagnosis threshold for swap warning (429df8c4)
|
||||
- [fix] Make sure we have a list for log_list + make sure item is in list before using .remove()... (afbeb145, 43facfd5)
|
||||
- [fix] Sometimes tree-model has a weird \x00 which breaks yunopaste (c346f5f1)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Mon, 11 May 2020 00:50:34 +0200
|
||||
|
||||
yunohost (3.8.4) testing; urgency=low
|
||||
|
||||
- [fix] Restoration of custom hooks / missing restore hooks (#927)
|
||||
- [enh] Real CSP headers for the webadmin (#961)
|
||||
- [enh] Simplify / optimize reading version of yunohost packages... (#968)
|
||||
- [fix] handle new auto restart of ldap in moulinette (#975)
|
||||
- [enh] service.py cleanup + add tests for services (#979, #986)
|
||||
- [fix] Enforce permissions for stuff in /etc/yunohost/ (#963)
|
||||
- [mod] Remove security diagnosis category for now, Move meltdown check to base system (a799740a)
|
||||
- [mod] Change warning/errors about swap as info instead ... add a tip about the fact that having swap on SD or SSD is dangerous (23147161)
|
||||
- [enh] Improve auto diagnosis cron UX, add a --human-readable option to diagnosis_show() (aecbb14a)
|
||||
- [enh] Rely on new diagnosis for letsencrypt elligibility (#985)
|
||||
- [i18n] Translations updated for Catalan, Esperanto, French, Spanish
|
||||
|
||||
Thanks to all contributors <3 ! (amirale qt, autra, Bram, clecle226, I. Hernández, Kay0u, xaloc33)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 09 May 2020 21:20:00 +0200
|
||||
|
||||
yunohost (3.8.3) testing; urgency=low
|
||||
|
||||
- [fix] Remove dot in reverse DNS check
|
||||
- [fix] Upgrade of multi-instance apps was broken (#976)
|
||||
- [fix] Check was broken if an apps with no domain setting was installed (#978)
|
||||
- [enh] Add a timeout to wget (#972)
|
||||
- [fix] ynh_get_ram: Enforce choosing --free or --total (#972)
|
||||
- [fix] Simplify / improve robustness of backup list
|
||||
- [enh] Make nodejs helpers easier to use (#939)
|
||||
- [fix] Misc tweak for disk usage diagnosis, some values were inconsistent / bad UX / ...
|
||||
- [enh] Assert slapd is running to avoid miserably crashing with weird ldap errors
|
||||
- [enh] Try to show smarter / more useful logs by filtering irrelevant lines like set +x etc
|
||||
- Technical tweaks for metronome 3.14.0 support
|
||||
- Misc improvements for tests and linters
|
||||
|
||||
Thanks to all contributors <3 ! (Bram, Kay0u, Maniack C., ljf, Maranda)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 07 Apr 2020 04:00:00 +0000
|
||||
|
||||
yunohost (3.8.2.2) testing; urgency=low
|
||||
|
||||
Aleks broke everything /again/ *.*
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 30 Apr 2020 18:05:00 +0000
|
||||
|
||||
yunohost (3.8.2.1) testing; urgency=low
|
||||
|
||||
- [fix] Make sure DNS queries are dong using absolute names to avoid stupid issues
|
||||
- [fix] More reliable way to fetch PTR record / reverse DNS
|
||||
- [fix] Propagate IPv6 default route check to ip diagnoser code as well
|
||||
|
||||
Thanks to ljf for the tests and fixes !
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 30 Apr 2020 17:30:00 +0000
|
||||
|
||||
yunohost (3.8.2) testing; urgency=low
|
||||
|
||||
### Diagnosis
|
||||
|
||||
- [fix] Some DNS queries triggered false negatives about CNAME/A record and email blacklisting (#943)
|
||||
- [enh] Add a check about domain expiration (#944)
|
||||
- [enh] Dirty hack to automatically find custom SSH port and diagnose it instead of 22 (b78d722)
|
||||
- [enh] Add a tip / explanation when IPv6 ain't working / available (426d938)
|
||||
- [fix] Small false-negative about not having IPv6 when it's actually working (822c731)
|
||||
|
||||
### Helpers
|
||||
|
||||
- [fix] When setting up a new db, corresponding user should be declared as owner (#813)
|
||||
- [enh] Add dynamic variables to systemd helper (#937)
|
||||
- [enh] Clean helpers (#947)
|
||||
- [fix] getopts behaved in weird way when fed empty parameters (#948)
|
||||
- [fix] Use ynh_port_available in ynh_find_port (#957)
|
||||
|
||||
### Others
|
||||
|
||||
- [enh] Setup all XMPP components for each "parent" domains (#916)
|
||||
- [fix] Previous change in Postfix ciphers broke TLS (#949)
|
||||
- [fix] Update ACME snippet detection following previous change (#950)
|
||||
- [fix] Trying to install apps with unpatchable legacy helpers was breaking stuff (#954)
|
||||
- [fix] Patch usage of old 'yunohost tools diagnosis' (#954)
|
||||
- [enh] Misc optimizations to speed up regen-conf and other things (#958)
|
||||
- [enh] When sharing logs, also anonymize folder name containing %2e instead of dot (b392efd)
|
||||
- [enh] Keep track of yunohost version a backup was made from (54cc684)
|
||||
- [fix] Re-add 'app fetchlist', 'app list -i', 'app list' filter for backward compatibility... (69938c3)
|
||||
- [i18n] Improve translations for Catalan, German, French, Esperanto, Spanish, Greek, Nepali, Occitan
|
||||
|
||||
Thanks to all contributors <3 ! (Bram, C. Wehrli, Kay0u, Maniack C., Quentí, Zeik0s, amirale qt, ljf, pitchum, tituspijean, xaloc33, Éric G.)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 29 Apr 2020 23:15:00 +0000
|
||||
|
||||
yunohost (3.8.1.1) testing; urgency=low
|
||||
|
||||
- [fix] Stupid issue about path in debian/install ...
|
||||
|
@ -74,6 +382,12 @@ yunohost (3.8.0) testing; urgency=low
|
|||
|
||||
-- Kay0u <pierre@kayou.io> Thu, 09 Apr 2020 19:59:18 +0000
|
||||
|
||||
yunohost (3.7.1.3) stable; urgency=low
|
||||
|
||||
- [fix] Fix the hotfix about trailing slash, it was breaking access to app installed on domain root..
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 28 Apr 2020 19:00:00 +0000
|
||||
|
||||
yunohost (3.7.1.2) stable; urgency=low
|
||||
|
||||
- [fix] Be more robust against some situation where some archives are corrupted
|
||||
|
@ -1569,7 +1883,7 @@ yunohost (2.5.2) testing; urgency=low
|
|||
|
||||
Other fixes and improvements:
|
||||
* [enh] remove timeout from cli interface
|
||||
* [fix] [#662](https://dev.yunohost.org/issues/662): missing 'python-openssl' dependency for Let's Encrypt integration.
|
||||
* [fix] #662: missing 'python-openssl' dependency for Let's Encrypt integration.
|
||||
* [fix] --no-remove-on-failure for app install should behave as a flag.
|
||||
* [fix] don't remove trailing char if it's not a slash
|
||||
|
||||
|
|
6
debian/conf/pam/mkhomedir
vendored
6
debian/conf/pam/mkhomedir
vendored
|
@ -1,6 +0,0 @@
|
|||
Name: Create home directory during login
|
||||
Default: yes
|
||||
Priority: 900
|
||||
Session-Type: Additional
|
||||
Session:
|
||||
required pam_mkhomedir.so umask=0022 skel=/etc/skel
|
39
debian/control
vendored
39
debian/control
vendored
|
@ -11,12 +11,11 @@ Package: yunohost
|
|||
Essential: yes
|
||||
Architecture: all
|
||||
Depends: ${python:Depends}, ${misc:Depends}
|
||||
, moulinette (>= 3.7), ssowat (>= 3.7)
|
||||
, moulinette (>= 4.0.0~alpha), ssowat (>= 4.0.0~alpha)
|
||||
, python-psutil, python-requests, python-dnspython, python-openssl
|
||||
, python-apt, python-miniupnpc, python-dbus, python-jinja2
|
||||
, python-toml
|
||||
, apt, apt-transport-https
|
||||
, nginx, nginx-extras (>=1.6.2)
|
||||
, python-miniupnpc, python-dbus, python-jinja2
|
||||
, python-toml, python-packaging, python-publicsuffix
|
||||
, apt, apt-transport-https, dirmngr
|
||||
, php-fpm, php-ldap, php-intl
|
||||
, mariadb-server, php-mysql | php-mysqlnd
|
||||
, openssh-server, iptables, fail2ban, dnsutils, bind9utils
|
||||
|
@ -25,31 +24,29 @@ Depends: ${python:Depends}, ${misc:Depends}
|
|||
, dnsmasq, avahi-daemon, libnss-mdns, resolvconf, libnss-myhostname
|
||||
, postfix, postfix-ldap, postfix-policyd-spf-perl, postfix-pcre
|
||||
, dovecot-core, dovecot-ldap, dovecot-lmtpd, dovecot-managesieved, dovecot-antispam
|
||||
, rspamd (>= 1.6.0), opendkim-tools, postsrsd, procmail, mailutils
|
||||
, rspamd, opendkim-tools, postsrsd, procmail, mailutils
|
||||
, redis-server
|
||||
, metronome
|
||||
, git, curl, wget, cron, unzip, jq
|
||||
, lsb-release, haveged, fake-hwclock, equivs, lsof
|
||||
, metronome (>=3.14.0)
|
||||
, git, curl, wget, cron, unzip, jq, bc
|
||||
, lsb-release, haveged, fake-hwclock, equivs, lsof, whois, python-publicsuffix
|
||||
Recommends: yunohost-admin
|
||||
, ntp, inetutils-ping | iputils-ping
|
||||
, bash-completion, rsyslog
|
||||
, php-gd, php-curl, php-gettext, php-mcrypt
|
||||
, php-gd, php-curl, php-gettext
|
||||
, python-pip
|
||||
, unattended-upgrades
|
||||
, libdbd-ldap-perl, libnet-dns-perl
|
||||
Suggests: htop, vim, rsync, acpi-support-base, udisks2
|
||||
Conflicts: iptables-persistent
|
||||
, moulinette-yunohost, yunohost-config
|
||||
, yunohost-config-others, yunohost-config-postfix
|
||||
, yunohost-config-dovecot, yunohost-config-slapd
|
||||
, yunohost-config-nginx, yunohost-config-amavis
|
||||
, yunohost-config-mysql, yunohost-predepends
|
||||
, apache2, bind9
|
||||
Replaces: moulinette-yunohost, yunohost-config
|
||||
, yunohost-config-others, yunohost-config-postfix
|
||||
, yunohost-config-dovecot, yunohost-config-slapd
|
||||
, yunohost-config-nginx, yunohost-config-amavis
|
||||
, yunohost-config-mysql, yunohost-predepends
|
||||
, apache2
|
||||
, bind9
|
||||
, nginx-extras (>= 1.16)
|
||||
, openssl (>= 1.1.1g)
|
||||
, slapd (>= 2.4.49)
|
||||
, dovecot-core (>= 1:2.3.7)
|
||||
, redis-server (>= 5:5.0.7)
|
||||
, fail2ban (>= 0.11)
|
||||
, iptables (>= 1.8.3)
|
||||
Description: manageable and configured self-hosting server
|
||||
YunoHost aims to make self-hosting accessible to everyone. It configures
|
||||
an email, Web and IM server alongside a LDAP base. It also provides
|
||||
|
|
2
debian/install
vendored
2
debian/install
vendored
|
@ -8,11 +8,11 @@ data/other/yunoprompt.service /etc/systemd/system/
|
|||
data/other/password/* /usr/share/yunohost/other/password/
|
||||
data/other/dpkg-origins/yunohost /etc/dpkg/origins
|
||||
data/other/dnsbl_list.yml /usr/share/yunohost/other/
|
||||
data/other/ffdhe2048.pem /usr/share/yunohost/other/
|
||||
data/other/* /usr/share/yunohost/yunohost-config/moulinette/
|
||||
data/templates/* /usr/share/yunohost/templates/
|
||||
data/helpers /usr/share/yunohost/
|
||||
data/helpers.d/* /usr/share/yunohost/helpers.d/
|
||||
debian/conf/pam/* /usr/share/pam-configs/
|
||||
lib/metronome/modules/* /usr/lib/metronome/modules/
|
||||
locales/* /usr/lib/moulinette/yunohost/locales/
|
||||
src/yunohost /usr/lib/moulinette
|
||||
|
|
5
debian/postinst
vendored
5
debian/postinst
vendored
|
@ -29,11 +29,6 @@ do_configure() {
|
|||
|
||||
# Yunoprompt
|
||||
systemctl enable yunoprompt.service
|
||||
|
||||
# remove old PAM config and update it
|
||||
[[ ! -f /usr/share/pam-configs/my_mkhomedir ]] \
|
||||
|| rm /usr/share/pam-configs/my_mkhomedir
|
||||
pam-auth-update --package
|
||||
}
|
||||
|
||||
# summary of how this script can be called:
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
<h1>App helpers</h1>
|
||||
|
||||
<p>Doc auto-generated by <a href="https://github.com/YunoHost/yunohost/blob/stretch-unstable/doc/generate_helper_doc.py">this script</a> on {{data.date}} (Yunohost version {{data.version}})</p>
|
||||
|
||||
{% for category, helpers in data.helpers %}
|
||||
|
||||
<h3 style="text-transform: uppercase; font-weight: bold">{{ category }}</h3>
|
||||
|
@ -70,7 +72,7 @@
|
|||
</p>
|
||||
{% endif %}
|
||||
<p>
|
||||
<a href="https://github.com/YunoHost/yunohost/blob/stretch-unstable/data/helpers.d/{{ category }}#L{{ h.line + 1 }}">Dude, show me the code !</a>
|
||||
<a href="https://github.com/YunoHost/yunohost/blob/stretch-stable/data/helpers.d/{{ category }}#L{{ h.line + 1 }}">Dude, show me the code !</a>
|
||||
</p>
|
||||
|
||||
</div>
|
||||
|
@ -81,9 +83,6 @@
|
|||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
<p>Generated by <a href="https://github.com/YunoHost/yunohost/blob/stretch-unstable/doc/generate_helper_doc.py">this script</a> on {{data.date}} (Yunohost version {{data.version}})</p>
|
||||
|
||||
|
||||
<style>
|
||||
/*=================================================
|
||||
Helper card
|
||||
|
|
|
@ -228,7 +228,7 @@ function driver:stores(username, type, pattern)
|
|||
return nil, "not implemented";
|
||||
end
|
||||
|
||||
function driver:store_exists(username, datastore, type)
|
||||
function driver:store_exists(username, type)
|
||||
return nil, "not implemented";
|
||||
end
|
||||
|
||||
|
@ -236,7 +236,7 @@ function driver:purge(username)
|
|||
return nil, "not implemented";
|
||||
end
|
||||
|
||||
function driver:users()
|
||||
function driver:nodes(type)
|
||||
return nil, "not implemented";
|
||||
end
|
||||
|
||||
|
|
|
@ -164,7 +164,7 @@
|
|||
"diagnosis_basesystem_kernel": "هذا الخادم يُشغّل نواة لينكس {kernel_version}",
|
||||
"diagnosis_basesystem_ynh_single_version": "{package} الإصدار: {version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_main_version": "هذا الخادم يُشغّل YunoHost {main_version} ({repo})",
|
||||
"diagnosis_everything_ok": "كل شيء على ما يرام في {category}!",
|
||||
"diagnosis_everything_ok": "كل شيء يبدو على ما يرام في {category}!",
|
||||
"diagnosis_ip_connected_ipv4": "الخادم مُتّصل بالإنترنت عبر IPv4!",
|
||||
"diagnosis_ip_connected_ipv6": "الخادم مُتّصل بالإنترنت عبر IPv6!",
|
||||
"diagnosis_ip_not_connected_at_all": "يبدو أنّ الخادم غير مُتّصل بتاتا بالإنترنت!؟",
|
||||
|
@ -172,5 +172,16 @@
|
|||
"apps_already_up_to_date": "كافة التطبيقات مُحدّثة",
|
||||
"app_remove_after_failed_install": "جارٍ حذف التطبيق بعدما فشل تنصيبها…",
|
||||
"apps_catalog_updating": "جارٍ تحديث فهرس التطبيقات…",
|
||||
"apps_catalog_update_success": "تم تحديث فهرس التطبيقات!"
|
||||
"apps_catalog_update_success": "تم تحديث فهرس التطبيقات!",
|
||||
"diagnosis_domain_expiration_error": "ستنتهي مدة صلاحية بعض النطاقات في القريب العاجل!",
|
||||
"diagnosis_domain_expiration_warning": "ستنتهي مدة صلاحية بعض النطاقات قريبًا!",
|
||||
"diagnosis_ports_could_not_diagnose_details": "خطأ: {error}",
|
||||
"diagnosis_description_regenconf": "إعدادات النظام",
|
||||
"diagnosis_description_mail": "البريد الإلكتروني",
|
||||
"diagnosis_description_web": "الويب",
|
||||
"diagnosis_description_systemresources": "موارد النظام",
|
||||
"diagnosis_description_services": "حالة الخدمات",
|
||||
"diagnosis_description_dnsrecords": "تسجيلات خدمة DNS",
|
||||
"diagnosis_description_ip": "الإتصال بالإنترنت",
|
||||
"diagnosis_description_basesystem": "النظام الأساسي"
|
||||
}
|
||||
|
|
125
locales/ca.json
125
locales/ca.json
|
@ -4,7 +4,7 @@
|
|||
"admin_password_change_failed": "No es pot canviar la contrasenya",
|
||||
"admin_password_changed": "S'ha canviat la contrasenya d'administració",
|
||||
"app_already_installed": "{app:s} ja està instal·lada",
|
||||
"app_already_installed_cant_change_url": "Aquesta aplicació ja està instal·lada. La URL no és pot canviar únicament amb aquesta funció. Mireu a \"app changeurl\" si està disponible.",
|
||||
"app_already_installed_cant_change_url": "Aquesta aplicació ja està instal·lada. La URL no és pot canviar únicament amb aquesta funció. Mireu a `app changeurl` si està disponible.",
|
||||
"app_already_up_to_date": "{app:s} ja està actualitzada",
|
||||
"app_argument_choice_invalid": "Utilitzeu una de les opcions «{choices:s}» per l'argument «{name:s}»",
|
||||
"app_argument_invalid": "Escolliu un valor vàlid per l'argument «{name:s}»: {error:s}",
|
||||
|
@ -59,7 +59,7 @@
|
|||
"backup_couldnt_bind": "No es pot lligar {src:s} amb {dest:s}.",
|
||||
"backup_created": "S'ha creat la còpia de seguretat",
|
||||
"aborting": "Avortant.",
|
||||
"app_not_upgraded": "L'aplicació «{failed_app}» no s'ha pogut actualitzar, i com a conseqüència l'actualització de les següents aplicacions ha estat cancel·lada: {apps}",
|
||||
"app_not_upgraded": "L'aplicació «{failed_app}» no s'ha pogut actualitzar, i com a conseqüència s'ha cancel·lat l'actualització de les següents aplicacions: {apps}",
|
||||
"app_start_install": "instal·lant l'aplicació «{app}»…",
|
||||
"app_start_remove": "Eliminant l'aplicació «{app}»…",
|
||||
"app_start_backup": "Recuperant els fitxers pels que s'ha de fer una còpia de seguretat per «{app}»…",
|
||||
|
@ -100,7 +100,7 @@
|
|||
"backup_unable_to_organize_files": "No s'ha pogut utilitzar el mètode ràpid per organitzar els fitxers dins de l'arxiu",
|
||||
"backup_with_no_backup_script_for_app": "L'aplicació «{app:s}» no té un script de còpia de seguretat. Serà ignorat.",
|
||||
"backup_with_no_restore_script_for_app": "L'aplicació «{app:s}» no té un script de restauració, no podreu restaurar automàticament la còpia de seguretat d'aquesta aplicació.",
|
||||
"certmanager_acme_not_configured_for_domain": "El certificat pel domini «{domain:s}» sembla que no està instal·lat correctament. Si us plau executeu primer «cert-install» per aquest domini.",
|
||||
"certmanager_acme_not_configured_for_domain": "No s'ha pogut executar el ACME challenge pel domini {domain} en aquests moments ja que a la seva configuració de nginx li manca el codi corresponent… Assegureu-vos que la configuració nginx està actualitzada utilitzant «yunohost tools regen-conf nginx --dry-run --with-diff».",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "El certificat pel domini «{domain:s}» no ha estat emès per Let's Encrypt. No es pot renovar automàticament!",
|
||||
"certmanager_attempt_to_renew_valid_cert": "El certificat pel domini «{domain:s}» està a punt de caducar! (Utilitzeu --force si sabeu el que esteu fent)",
|
||||
"certmanager_attempt_to_replace_valid_cert": "Esteu intentant sobreescriure un certificat correcte i vàlid pel domini {domain:s}! (Utilitzeu --force per ometre)",
|
||||
|
@ -113,8 +113,8 @@
|
|||
"certmanager_conflicting_nginx_file": "No s'ha pogut preparar el domini per al desafiament ACME: l'arxiu de configuració NGINX {filepath:s} entra en conflicte i s'ha d'eliminar primer",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "S'ha exhaurit el temps d'esperar al intentar recollir el certificat intermedi des de Let's Encrypt. La instal·lació/renovació del certificat s'ha cancel·lat - torneu a intentar-ho més tard.",
|
||||
"certmanager_domain_cert_not_selfsigned": "El certificat pel domini {domain:s} no és auto-signat Esteu segur de voler canviar-lo? (Utilitzeu «--force» per fer-ho)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "El registre DNS \"A\" pel domini «{domain:s}» és diferent a l'adreça IP d'aquest servidor. Si heu modificat recentment el registre A, si us plau espereu a que es propagui (hi ha eines per verificar la propagació disponibles a internet). (Si sabeu el que esteu fent, podeu utilitzar «--no-checks» per desactivar aquestes comprovacions.)",
|
||||
"certmanager_domain_http_not_working": "Sembla que el domini {domain:s} no és accessible via HTTP. Verifiqueu que les configuracions DNS i NGINX siguin correctes",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "Els registres DNS pel domini «{domain:s}» són diferents a l'adreça IP d'aquest servidor. Mireu la categoria «registres DNS» (bàsic) al diagnòstic per a més informació. Si heu modificat recentment el registre A, si us plau espereu a que es propagui (hi ha eines per verificar la propagació disponibles a internet). (Si sabeu el que esteu fent, podeu utilitzar «--no-checks» per desactivar aquestes comprovacions.)",
|
||||
"certmanager_domain_http_not_working": "El domini {domain:s} sembla que no és accessible via HTTP. Verifiqueu la categoria «Web» en el diagnòstic per a més informació. (Si sabeu el que esteu fent, utilitzeu «--no-checks» per deshabilitar les comprovacions.)",
|
||||
"certmanager_domain_unknown": "Domini desconegut «{domain:s}»",
|
||||
"certmanager_error_no_A_record": "No s'ha trobat cap registre DNS «A» per «{domain:s}». Heu de fer que el vostre nom de domini apunti cap a la vostra màquina per tal de poder instal·lar un certificat Let's Encrypt. (Si sabeu el que esteu fent, podeu utilitzar «--no-checks» per desactivar aquestes comprovacions.)",
|
||||
"certmanager_hit_rate_limit": "S'han emès massa certificats recentment per aquest mateix conjunt de dominis {domain:s}. Si us plau torneu-ho a intentar més tard. Consulteu https://letsencrypt.org/docs/rate-limits/ per obtenir més detalls",
|
||||
|
@ -127,7 +127,7 @@
|
|||
"confirm_app_install_thirdparty": "PERILL! Aquesta aplicació no es part del catàleg d'aplicacions de YunoHost. La instal·lació d'aplicacions de terceres parts pot comprometre la integritat i seguretat del seu sistema. No hauríeu d'instal·lar-ne a no ser que sapigueu el que feu. No obtindreu CAP AJUDA si l'aplicació no funciona o trenca el sistema… Si accepteu el risc, escriviu «{answers:s}»",
|
||||
"custom_app_url_required": "Heu de especificar una URL per actualitzar la vostra aplicació personalitzada {app:s}",
|
||||
"admin_password_too_long": "Trieu una contrasenya de menys de 127 caràcters",
|
||||
"dpkg_is_broken": "No es pot fer això en aquest instant perquè dpkg/APT (els gestors de paquets del sistema) sembla estar mal configurat… Podeu intentar solucionar-ho connectant-vos per SSH i executant «sudo dpkg --configure -a».",
|
||||
"dpkg_is_broken": "No es pot fer això en aquest instant perquè dpkg/APT (els gestors de paquets del sistema) sembla estar mal configurat… Podeu intentar solucionar-ho connectant-vos per SSH i executant «sudo apt install --fix-broken» i/o «sudo dpkg --configure -a».",
|
||||
"domain_cannot_remove_main": "No es pot eliminar «{domain:s}» ja que és el domini principal, primer s'ha d'establir un nou domini principal utilitzant «yunohost domain main-domain -n <un-altre-domini>»; aquí hi ha una llista dels possibles dominis: {other_domains:s}",
|
||||
"domain_cert_gen_failed": "No s'ha pogut generar el certificat",
|
||||
"domain_created": "S'ha creat el domini",
|
||||
|
@ -140,7 +140,7 @@
|
|||
"domain_dyndns_already_subscribed": "Ja us heu subscrit a un domini DynDNS",
|
||||
"domain_dyndns_root_unknown": "Domini DynDNS principal desconegut",
|
||||
"domain_hostname_failed": "No s'ha pogut establir un nou nom d'amfitrió. Això podria causar problemes més tard (podria no passar res).",
|
||||
"domain_uninstall_app_first": "Hi ha una o més aplicacions instal·lades en aquest domini. Desinstal·leu les abans d'eliminar el domini",
|
||||
"domain_uninstall_app_first": "Aquestes aplicacions encara estan instal·lades en el vostre domini: {apps}. Desinstal·leu les abans d'eliminar el domini",
|
||||
"domain_unknown": "Domini desconegut",
|
||||
"domains_available": "Dominis disponibles:",
|
||||
"done": "Fet",
|
||||
|
@ -167,7 +167,7 @@
|
|||
"file_does_not_exist": "El camí {path:s} no existeix.",
|
||||
"firewall_reload_failed": "No s'ha pogut tornar a carregar el tallafocs",
|
||||
"firewall_reloaded": "S'ha tornat a carregar el tallafocs",
|
||||
"firewall_rules_cmd_failed": "No s'han pogut aplicar algunes regles del tallafocs. Més informació en el registre.",
|
||||
"firewall_rules_cmd_failed": "Han fallat algunes comandes per aplicar regles del tallafocs. Més informació en el registre.",
|
||||
"global_settings_bad_choice_for_enum": "Opció pel paràmetre {setting:s} incorrecta, s'ha rebut «{choice:s}», però les opcions disponibles són: {available_choices:s}",
|
||||
"global_settings_bad_type_for_setting": "El tipus del paràmetre {setting:s} és incorrecte. S'ha rebut {received_type:s}, però s'esperava {expected_type:s}",
|
||||
"global_settings_cant_open_settings": "No s'ha pogut obrir el fitxer de configuració, raó: {reason:s}",
|
||||
|
@ -284,7 +284,7 @@
|
|||
"migration_0008_root": "• No es podrà connectar com a root a través de SSH. S'haurà d'utilitzar l'usuari admin per fer-ho;",
|
||||
"migration_0008_dsa": "• Es desactivarà la clau DSA. Per tant, es podria haver d'invalidar un missatge esgarrifós del client SSH, i tornar a verificar l'empremta digital del servidor;",
|
||||
"migration_0008_warning": "Si heu entès els avisos i voleu que YunoHost sobreescrigui la configuració actual, comenceu la migració. Sinó, podeu saltar-vos la migració, tot i que no està recomanat.",
|
||||
"migration_0008_no_warning": "Hauria de ser segur sobreescriure la configuració SSH, però no es pot estar del tot segur! Executetu la migració per sobreescriure-la. Sinó, podeu saltar-vos la migració, tot i que no està recomanat.",
|
||||
"migration_0008_no_warning": "Hauria de ser segur sobreescriure la configuració SSH, però no es pot estar del tot segur! Executeu la migració per sobreescriure-la. Sinó, podeu saltar-vos la migració, tot i que no està recomanat.",
|
||||
"migration_0009_not_needed": "Sembla que ja s'ha fet aquesta migració… (?) Ometent.",
|
||||
"migrations_cant_reach_migration_file": "No s'ha pogut accedir als fitxers de migració al camí «%s»",
|
||||
"migrations_list_conflict_pending_done": "No es pot utilitzar «--previous» i «--done» al mateix temps.",
|
||||
|
@ -390,7 +390,7 @@
|
|||
"ssowat_conf_updated": "S'ha actualitzat la configuració SSOwat",
|
||||
"system_upgraded": "S'ha actualitzat el sistema",
|
||||
"system_username_exists": "El nom d'usuari ja existeix en la llista d'usuaris de sistema",
|
||||
"this_action_broke_dpkg": "Aquesta acció a trencat dpkg/APT (els gestors de paquets del sistema)… Podeu intentar resoldre el problema connectant-vos amb SSH i executant «sudo dpkg --configure -a».",
|
||||
"this_action_broke_dpkg": "Aquesta acció a trencat dpkg/APT (els gestors de paquets del sistema)… Podeu intentar resoldre el problema connectant-vos amb SSH i executant «sudo apt install --fix-broken» i/o «sudo dpkg --configure -a».",
|
||||
"tools_upgrade_at_least_one": "Especifiqueu «--apps», o «--system»",
|
||||
"tools_upgrade_cant_both": "No es poden actualitzar tant el sistema com les aplicacions al mateix temps",
|
||||
"tools_upgrade_cant_hold_critical_packages": "No es poden mantenir els paquets crítics…",
|
||||
|
@ -504,7 +504,7 @@
|
|||
"diagnosis_basesystem_ynh_main_version": "El servidor funciona amb YunoHost {main_version} ({repo})",
|
||||
"diagnosis_ram_low": "El sistema només té {available} ({available_percent}%) de memòria RAM disponibles d'un total de {total}. Aneu amb compte.",
|
||||
"diagnosis_swap_none": "El sistema no té swap. Hauríeu de considerar afegir un mínim de {recommended} de swap per evitar situacions en les que el sistema es queda sense memòria.",
|
||||
"diagnosis_regenconf_manually_modified": "El fitxer de configuració {file} ha estat modificat manualment.",
|
||||
"diagnosis_regenconf_manually_modified": "El fitxer de configuració <code>{file}</code> sembla haver estat modificat manualment.",
|
||||
"diagnosis_security_vulnerable_to_meltdown_details": "Per arreglar-ho, hauríeu d'actualitzar i reiniciar el sistema per tal de carregar el nou nucli de linux (o contactar amb el proveïdor del servidor si no funciona). Vegeu https://meltdownattack.com/ per a més informació.",
|
||||
"diagnosis_http_could_not_diagnose": "No s'ha pogut diagnosticar si el domini és accessible des de l'exterior.",
|
||||
"diagnosis_http_could_not_diagnose_details": "Error: {error}",
|
||||
|
@ -531,23 +531,23 @@
|
|||
"diagnosis_ip_not_connected_at_all": "Sembla que el servidor no està connectat a internet!?",
|
||||
"diagnosis_ip_dnsresolution_working": "La resolució de nom de domini està funcionant!",
|
||||
"diagnosis_ip_broken_dnsresolution": "La resolució de nom de domini falla per algun motiu… Està el tallafocs bloquejant les peticions DNS?",
|
||||
"diagnosis_ip_broken_resolvconf": "La resolució de nom de domini sembla caiguda en el servidor, podria estar relacionat amb el fet que /etc/resolv.conf no apunta cap a 127.0.0.1.",
|
||||
"diagnosis_ip_weird_resolvconf": "La resolució DNS sembla estar funcionant, però aneu amb compte ja que esteu utilitzant un versió personalitzada de /etc/resolv.conf.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "En canvi, aquest fitxer hauria de ser un enllaç simbòlic cap a /etc/resolvconf/run/resolv.conf i que aquest apunti cap a 127.0.0.1 (dnsmasq). La configuració del «resolver» real s'hauria de fer a /etc/resolv.dnsmaq.conf.",
|
||||
"diagnosis_dns_good_conf": "Bona configuració DNS pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_bad_conf": "Configuració DNS incorrecta o inexistent pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_missing_record": "Segons la configuració DNS recomanada, hauríeu d'afegir un registre DNS\ntipus: {type}\nnom: {name}\nvalor: {value}.",
|
||||
"diagnosis_dns_discrepancy": "El registre DNS de tipus {type} i nom {name} no concorda amb la configuració recomanada.\nValor actual: {current}\nValor esperat: {value}",
|
||||
"diagnosis_ip_broken_resolvconf": "La resolució de nom de domini sembla caiguda en el servidor, podria estar relacionat amb el fet que <code>/etc/resolv.conf</code> no apunta cap a <code>127.0.0.1</code>.",
|
||||
"diagnosis_ip_weird_resolvconf": "La resolució DNS sembla estar funcionant, però sembla que esteu utilitzant un versió personalitzada de <code>/etc/resolv.conf</code>.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "El fitxer <code>etc/resolv.conf</code> hauria de ser un enllaç simbòlic cap a <code>/etc/resolvconf/run/resolv.conf</code> i que aquest apunti cap a <code>127.0.0.1</code> (dnsmasq). La configuració del «resolver» real s'hauria de fer a <code>/etc/resolv.dnsmaq.conf</code>.",
|
||||
"diagnosis_dns_good_conf": "Els registres DNS han estat correctament configurats pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_bad_conf": "Alguns registres DNS són incorrectes o no existeixen pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_missing_record": "Segons la configuració DNS recomanada, hauríeu d'afegir un registre DNS amb la següent informació.<br>Tipus: <code>{type}</code><br>Nom: <code>{name}</code><br>Valor: <code>{value}</code>",
|
||||
"diagnosis_dns_discrepancy": "La configuració DNS següent sembla que no segueix la configuració recomanada: <br>Tipus: <code>{type}</code><br>Nom: <code>{name}</code><br>Valor actual: <code>{current}</code><br>Valor esperat: <code>{value}</code>",
|
||||
"diagnosis_services_bad_status": "El servei {service} està {status} :(",
|
||||
"diagnosis_diskusage_verylow": "El lloc d'emmagatzematge {mountpoint} (en l'aparell {device}) només té disponibles {free} ({free_percent}%). Hauríeu de considerar alliberar una mica d'espai.",
|
||||
"diagnosis_diskusage_low": "El lloc d'emmagatzematge {mountpoint} (en l'aparell {device}) només té disponibles {free} ({free_percent}%). Aneu amb compte.",
|
||||
"diagnosis_diskusage_ok": "El lloc d'emmagatzematge {mountpoint} (en l'aparell {device}) encara té {free} ({free_percent}%) lliures!",
|
||||
"diagnosis_diskusage_verylow": "El lloc d'emmagatzematge <code>{mountpoint}</code> (en l'aparell <code>{device}</code>) només té disponibles {free} ({free_percent}%). Hauríeu de considerar alliberar una mica d'espai!",
|
||||
"diagnosis_diskusage_low": "El lloc d'emmagatzematge <code>{mountpoint}</code> (en l'aparell <code>{device}</code>) només té disponibles {free} ({free_percent}%). Aneu amb compte.",
|
||||
"diagnosis_diskusage_ok": "El lloc d'emmagatzematge <code>{mountpoint}</code> (en l'aparell <code>{device}</code>) encara té {free} ({free_percent}%) lliures!",
|
||||
"diagnosis_ram_verylow": "El sistema només té {available} ({available_percent}%) de memòria RAM disponibles! (d'un total de {total})",
|
||||
"diagnosis_ram_ok": "El sistema encara té {available} ({available_percent}%) de memòria RAM disponibles d'un total de {total}.",
|
||||
"diagnosis_swap_notsomuch": "El sistema només té {total} de swap. Hauríeu de considerar tenir un mínim de {recommended} per evitar situacions en les que el sistema es queda sense memòria.",
|
||||
"diagnosis_swap_ok": "El sistema té {total} de swap!",
|
||||
"diagnosis_regenconf_allgood": "Tots els fitxers de configuració estan en acord amb la configuració recomanada!",
|
||||
"diagnosis_regenconf_manually_modified_details": "No hauria de ser cap problema sempre i quan sapigueu el que esteu fent ;) !",
|
||||
"diagnosis_regenconf_manually_modified_details": "No hauria de ser cap problema sempre i quan sapigueu el que esteu fent! YunoHost deixarà d'actualitzar aquest fitxer de manera automàtica… Però tingueu en compte que les actualitzacions de YunoHost podrien tenir canvis recomanats importants. Si voleu podeu mirar les diferències amb <cmd>yunohost tools regen-conf {category} --dry-run --with-diff</cmd> i forçar el restabliment de la configuració recomanada amb <cmd>yunohost tools regen-conf {category} --force</cmd>",
|
||||
"diagnosis_regenconf_manually_modified_debian": "El fitxer de configuració {file} ha estat modificat manualment respecte al fitxer per defecte de Debian.",
|
||||
"diagnosis_regenconf_manually_modified_debian_details": "No hauria de ser cap problema, però ho haureu de vigilar...",
|
||||
"diagnosis_security_all_good": "No s'ha trobat cap vulnerabilitat de seguretat crítica.",
|
||||
|
@ -577,11 +577,11 @@
|
|||
"diagnosis_description_mail": "Correu electrònic",
|
||||
"migration_description_0013_futureproof_apps_catalog_system": "Migrar al nou sistema de catàleg d'aplicacions resistent al pas del temps",
|
||||
"app_upgrade_script_failed": "Hi ha hagut un error en el script d'actualització de l'aplicació",
|
||||
"diagnosis_services_bad_status_tip": "Podeu intentar reiniciar el servei, i si no funciona, podeu mirar els registres del servei utilitzant «yunohost service log {service}» o a través de «Serveis» a la secció de la pàgina web d'administració.",
|
||||
"diagnosis_ports_forwarding_tip": "Per arreglar aquest problema, segurament s'ha de configurar el reenviament de ports en el router tal i com s'explica a https://yunohost.org/isp_box_config",
|
||||
"diagnosis_http_bad_status_code": "El sistema de diagnòstic no ha pogut connectar amb el servidor. Podria ser que una altra màquina hagi contestat en lloc del servidor. S'hauria de comprovar que el reenviament del port 80 sigui correcte, que la configuració NGINX està actualitzada i que el reverse-proxy no està interferint.",
|
||||
"diagnosis_services_bad_status_tip": "Podeu intentar <a href='#/services/{service}'>reiniciar el servei</a>, i si no funciona, podeu mirar <a href='#/services/{service}'>els registres a la pàgina web d'administració</a> (des de la línia de comandes, ho podeu fer utilitzant <cmd>yunohost service restart {service}</cmd> i <cmd>yunohost service log {service}</cmd>).",
|
||||
"diagnosis_ports_forwarding_tip": "Per arreglar aquest problema, segurament s'ha de configurar el reenviament de ports en el router tal i com s'explica a <a href='https://yunohost.org/isp_box_config'>https://yunohost.org/isp_box_config</a>",
|
||||
"diagnosis_http_bad_status_code": "Sembla que una altra màquina (potser el router) a respost en lloc del vostre servidor.<br>1. La causa més probable per a aquest problema és que el port 80 (i 443) <a href='https://yunohost.org/isp_box_config'>no reenvien correctament cap al vostre servidor</a>.<br>2. En configuracions més complexes: assegureu-vos que no hi ha cap tallafoc o reverse-proxy interferint.",
|
||||
"diagnosis_no_cache": "Encara no hi ha memòria cau pel diagnòstic de la categoria «{category}»",
|
||||
"diagnosis_http_timeout": "S'ha exhaurit el temps d'esperar intentant connectar amb el servidor des de l'exterior. Sembla que no s'hi pot accedir. S'hauria de comprovar que el reenviament del port 80 és correcte, que NGINX funciona, i que el tallafocs no està interferint.",
|
||||
"diagnosis_http_timeout": "S'ha exhaurit el temps d'esperar intentant connectar amb el servidor des de l'exterior.<br>1. La causa més probable per a aquest problema és que el port 80 (i 443) <a href='https://yunohost.org/isp_box_config'>no reenvien correctament cap al vostre servidor</a>.<br>2. També us hauríeu d'assegurar que el servei nginx estigui funcionant<br>3. En configuracions més complexes: assegureu-vos que no hi ha cap tallafoc o reverse-proxy interferint.",
|
||||
"diagnosis_http_connection_error": "Error de connexió: no s'ha pogut connectar amb el domini demanat, segurament és inaccessible.",
|
||||
"yunohost_postinstall_end_tip": "S'ha completat la post-instal·lació. Per acabar la configuració, considereu:\n - afegir un primer usuari a través de la secció «Usuaris» a la pàgina web d'administració (o emprant «yunohost user create <username>» a la línia d'ordres);\n - diagnosticar possibles problemes a través de la secció «Diagnòstics» a la pàgina web d'administració (o emprant «yunohost diagnosis run» a la línia d'ordres);\n - llegir les seccions «Finalizing your setup» i «Getting to know Yunohost» a la documentació per administradors: https://yunohost.org/admindoc.",
|
||||
"migration_description_0014_remove_app_status_json": "Eliminar els fitxers d'aplicació status.json heretats",
|
||||
|
@ -596,5 +596,76 @@
|
|||
"diagnosis_description_web": "Web",
|
||||
"diagnosis_basesystem_hardware_board": "El model de la targeta del servidor és {model}",
|
||||
"diagnosis_basesystem_hardware": "L'arquitectura del maquinari del servidor és {virt} {arch}",
|
||||
"group_already_exist_on_system_but_removing_it": "El grup {group} ja existeix en els grups del sistema, però YunoHost l'eliminarà…"
|
||||
"group_already_exist_on_system_but_removing_it": "El grup {group} ja existeix en els grups del sistema, però YunoHost l'eliminarà…",
|
||||
"certmanager_warning_subdomain_dns_record": "El subdomini «{subdomain:s}» no resol a la mateixa adreça IP que «{domain:s}». Algunes funcions no estaran disponibles fins que no s'hagi arreglat i s'hagi regenerat el certificat.",
|
||||
"domain_cannot_add_xmpp_upload": "No podeu afegir dominis començant per «xmpp-upload.». Aquest tipus de nom està reservat per a la funció de pujada de XMPP integrada a YunoHost.",
|
||||
"diagnosis_display_tip": "Per veure els problemes que s'han trobat, podeu anar a la secció de Diagnòstic a la pàgina web d'administració, o utilitzar « yunohost diagnostic show --issues » a la línia de comandes.",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Alguns proveïdors no permeten desbloquejar el port de sortida 25 perquè no els hi importa la Neutralitat de la Xarxa.<br> - Alguns d'ells ofereixen l'alternativa d'<a href='https://yunohost.org/#/smtp_relay'>utilitzar un relay de servidor de correu electrònic</a> tot i que implica que el relay serà capaç d'espiar el tràfic de correus electrònics.<br>- Una alternativa respectuosa amb la privacitat és utilitzar una VPN *amb una IP pública dedicada* per sortejar aquest tipus de limitació. Vegeu <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a><br>- També podeu considerar canviar-vos a <a href='https://yunohost.org/#/isp'>un proveïdor més respectuós de la neutralitat de la xarxa</a>",
|
||||
"diagnosis_ip_global": "IP global: <code>{global}</code>",
|
||||
"diagnosis_ip_local": "IP local: <code>{local}</code>",
|
||||
"diagnosis_dns_point_to_doc": "Consulteu la documentació a <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si necessiteu ajuda per configurar els registres DNS.",
|
||||
"diagnosis_mail_outgoing_port_25_ok": "El servidor de correu electrònic SMTP pot enviar correus electrònics (el port de sortida 25 no està bloquejat).",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_details": "Primer heu d'intentar desbloquejar el port 25 en la interfície del vostre router o en la interfície del vostre allotjador. (Alguns proveïdors d'allotjament demanen enviar un tiquet de suport en aquests casos).",
|
||||
"diagnosis_mail_ehlo_ok": "El servidor de correu electrònic SMTP no és accessible des de l'exterior i per tant no pot rebre correus electrònics!",
|
||||
"diagnosis_mail_ehlo_unreachable": "El servidor de correu electrònic SMTP no és accessible des de l'exterior amb IPv{ipversion}. No podrà rebre correus electrònics.",
|
||||
"diagnosis_mail_ehlo_bad_answer": "Un servei no SMTP a respost en el port 25 amb IPv{ipversion}",
|
||||
"diagnosis_mail_ehlo_bad_answer_details": "Podria ser que sigui per culpa d'una altra màquina responent en lloc del servidor.",
|
||||
"diagnosis_mail_ehlo_wrong": "Un servidor de correu electrònic SMTP diferent respon amb IPv{ipversion}. És probable que el vostre servidor no pugui rebre correus electrònics.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose": "No s'ha pogut diagnosticar si el servidor de correu electrònic postfix és accessible des de l'exterior amb IPv{ipversion}.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose_details": "Error: {error}",
|
||||
"diagnosis_mail_fcrdns_ok": "S'ha configurat correctament el servidor DNS invers!",
|
||||
"diagnosis_mail_blacklist_ok": "Sembla que les IPs i el dominis d'aquest servidor no són en una llista negra",
|
||||
"diagnosis_mail_blacklist_listed_by": "La vostra IP o domini <code>{item}</code> està en una llista negra a {blacklist_name}",
|
||||
"diagnosis_mail_blacklist_reason": "El motiu de ser a la llista negra és: {reason}",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain": "El DNS invers no està correctament configurat amb IPv{ipversion}. Alguns correus electrònics poden no arribar al destinatari o ser marcats com correu brossa.",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "DNS invers actual: <code>{rdns_domain}</code><br>Valor esperat: <code>{ehlo_domain}</code>",
|
||||
"diagnosis_mail_queue_ok": "{nb_pending} correus electrònics pendents en les cues de correu electrònic",
|
||||
"diagnosis_mail_queue_unavailable": "No s'ha pogut consultar el nombre de correus electrònics pendents en la cua",
|
||||
"diagnosis_mail_queue_unavailable_details": "Error: {error}",
|
||||
"diagnosis_mail_queue_too_big": "Hi ha massa correus electrònics pendents en la cua ({nb_pending} correus electrònics)",
|
||||
"diagnosis_http_hairpinning_issue": "Sembla que la vostra xarxa no té el hairpinning activat.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date": "La configuració NGINX d'aquest domini sembla que ha estat modificada manualment, i no deixa que YunoHost diagnostiqui si és accessible amb HTTP.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date_details": "Per arreglar el problema, mireu les diferències amb la línia d'ordres utilitzant <cmd>yunohost tools regen-conf nginx --dry-run --with-diff</cmd> i si els canvis us semblen bé els podeu fer efectius utilitzant <cmd>yunohost tools regen-conf nginx --force</cmd>.",
|
||||
"global_settings_setting_smtp_allow_ipv6": "Permet l'ús de IPv6 per rebre i enviar correus electrònics",
|
||||
"diagnosis_mail_ehlo_unreachable_details": "No s'ha pogut establir una connexió amb el vostre servidor en el port 25 amb IPv{ipversion}. Sembla que el servidor no és accessible.<br>1. La causa més comú per aquest problema és que el port 25 <a href='https://yunohost.org/isp_box_config'>no està correctament redireccionat cap al vostre servidor</a>.<br>2. També us hauríeu d'assegurar que el servei postfix estigui funcionant.<br>3. En configuracions més complexes: assegureu-vos que que no hi hagi cap tallafoc ni reverse-proxy interferint.",
|
||||
"diagnosis_mail_ehlo_wrong_details": "El EHLO rebut pel servidor de diagnòstic remot amb IPv{ipversion} és diferent al domini del vostre servidor.<br>EHLO rebut: <code>{wrong_ehlo}</code><br>Esperat: <code>{right_ehlo}</code><br>La causa més habitual d'aquest problema és que el port 25 <a href='https://yunohost.org/isp_box_config'>no està correctament reenviat cap al vostre servidor</a>. També podeu comprovar que no hi hagi un tallafocs o un reverse-proxy interferint.",
|
||||
"diagnosis_mail_fcrdns_dns_missing": "No hi ha cap DNS invers definit per IPv{ipversion}. Alguns correus electrònics poden no entregar-se o poden ser marcats com a correu brossa.",
|
||||
"diagnosis_mail_blacklist_website": "Després d'haver identificat perquè estàveu llistats i haver arreglat el problema, no dubteu en demanar que la vostra IP o domini sigui eliminat de {blacklist_website}",
|
||||
"diagnosis_ports_partially_unreachable": "El port {port} no és accessible des de l'exterior amb IPv{failed}.",
|
||||
"diagnosis_http_partially_unreachable": "El domini {domain} sembla que no és accessible utilitzant HTTP des de l'exterior de la xarxa local amb IPv{failed}, tot i que funciona amb IPv{passed}.",
|
||||
"diagnosis_mail_fcrdns_nok_details": "Hauríeu d'intentar configurar primer el DNS invers amb <code>{ehlo_domain}</code> en la interfície del router o en la interfície del vostre allotjador. (Alguns allotjadors requereixen que obris un informe de suport per això).",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_4": "Alguns proveïdors no permeten configurar el DNS invers (o aquesta funció pot no funcionar…). Si teniu problemes a causa d'això, considereu les solucions següents:<br> - Alguns proveïdors d'accés a internet (ISP) donen l'alternativa de <a href='https://yunohost.org/#/smtp_relay'> utilitzar un relay de servidor de correu electrònic</a> tot i que implica que el relay podrà espiar el trànsit de correus electrònics.<br>- Una alternativa respectuosa amb la privacitat és utilitzar una VPN *amb una IP pública dedicada* per sobrepassar aquest tipus de limitacions. Mireu <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a><br>- O es pot <a href='https://yunohost.org/#/isp'>canviar a un proveïdor diferent</a>",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_6": "Alguns proveïdors no permeten configurar el vostre DNS invers (o la funció no els hi funciona…). Si el vostre DNS invers està correctament configurat per IPv4, podeu intentar deshabilitar l'ús de IPv6 per a enviar correus electrònics utilitzant <cmd>yunohost settings set smtp.allow_ipv6 -v off</cmd>. Nota: aquesta última solució implica que no podreu enviar o rebre correus electrònics cap a els pocs servidors que hi ha que només tenen IPv-6.",
|
||||
"diagnosis_http_hairpinning_issue_details": "Això és probablement a causa del router del vostre proveïdor d'accés a internet. El que fa, que gent de fora de la xarxa local pugui accedir al servidor sense problemes, però no la gent de dins la xarxa local (com vostè probablement) quan s'utilitza el nom de domini o la IP global. Podreu segurament millorar la situació fent una ullada a <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a>",
|
||||
"backup_archive_cant_retrieve_info_json": "No s'ha pogut carregar la informació de l'arxiu «{archive}»… No s'ha pogut obtenir el fitxer info.json (o no és un fitxer json vàlid).",
|
||||
"backup_archive_corrupted": "Sembla que l'arxiu de la còpia de seguretat «{archive}» està corromput : {error}",
|
||||
"certmanager_domain_not_diagnosed_yet": "Encara no hi ha cap resultat de diagnòstic per al domini %s. Torneu a executar el diagnòstic per a les categories «Registres DNS» i «Web» en la secció de diagnòstic per comprovar que el domini està preparat per a Let's Encrypt. (O si sabeu el que esteu fent, utilitzant «--no-checks» per deshabilitar les comprovacions.)",
|
||||
"diagnosis_ip_no_ipv6_tip": "Utilitzar una IPv6 no és obligatori per a que funcioni el servidor, però és millor per la salut d'Internet en conjunt. La IPv6 hauria d'estar configurada automàticament pel sistema o pel proveïdor si està disponible. Si no és el cas, pot ser necessari configurar alguns paràmetres més de forma manual tal i com s'explica en la documentació disponible aquí: <a href='https://yunohost.org/#/ipv6'>https://yunohost.org/#/ipv6</a>. Si no podeu habilitar IPv6 o us sembla massa tècnic, podeu ignorar aquest avís sense problemes.",
|
||||
"diagnosis_domain_expiration_not_found": "No s'ha pogut comprovar la data d'expiració d'alguns dominis",
|
||||
"diagnosis_domain_not_found_details": "El domini {domain} no existeix en la base de dades WHOIS o ha expirat!",
|
||||
"diagnosis_domain_expiration_not_found_details": "La informació WHOIS pel domini {domain} sembla que no conté informació sobre la data d'expiració?",
|
||||
"diagnosis_domain_expiration_success": "Els vostres dominis estan registrats i no expiraran properament.",
|
||||
"diagnosis_domain_expiration_warning": "Alguns dominis expiraran properament!",
|
||||
"diagnosis_domain_expiration_error": "Alguns dominis expiraran EN BREUS!",
|
||||
"diagnosis_domain_expires_in": "{domain} expirarà en {days} dies.",
|
||||
"diagnosis_swap_tip": "Vigileu i tingueu en compte que els servidor està allotjant memòria d'intercanvi en una targeta SD o en l'emmagatzematge SSD, això pot reduir dràsticament l'esperança de vida del dispositiu.",
|
||||
"restore_already_installed_apps": "No s'han pogut restaurar les següents aplicacions perquè ja estan instal·lades: {apps}",
|
||||
"app_packaging_format_not_supported": "No es pot instal·lar aquesta aplicació ja que el format del paquet no és compatible amb la versió de YunoHost del sistema. Hauríeu de considerar actualitzar el sistema.",
|
||||
"diagnosis_dns_try_dyndns_update_force": "La configuració DNS d'aquest domini hauria de ser gestionada automàticament per YunoHost. Si aquest no és el cas, podeu intentar forçar-ne l'actualització utilitzant <cmd>yunohost dyndns update --force</cmd>.",
|
||||
"migration_0015_cleaning_up": "Netejant la memòria cau i els paquets que ja no són necessaris…",
|
||||
"migration_0015_specific_upgrade": "Començant l'actualització dels paquets del sistema que s'han d'actualitzar de forma independent…",
|
||||
"migration_0015_modified_files": "Tingueu en compte que s'han trobat els següents fitxers que es van modificar manualment i podria ser que es sobreescriguin durant l'actualització: {manually_modified_files}",
|
||||
"migration_0015_problematic_apps_warning": "Tingueu en compte que s'han trobat les següents aplicacions que podrien ser problemàtiques. Sembla que aquestes aplicacions no s'han instal·lat des del catàleg d'aplicacions de YunoHost, o no estan marcades com «funcionant». En conseqüència, no es pot garantir que segueixin funcionant després de l'actualització: {problematic_apps}",
|
||||
"migration_0015_general_warning": "Tingueu en compte que aquesta migració és una operació delicada. L'equip de YunoHost ha fet tots els possibles per revisar i testejar, però tot i això podria ser que la migració trenqui alguna part del sistema o algunes aplicacions.\n\nPer tant, està recomana:\n - Fer una còpia de seguretat de totes les dades o aplicacions crítiques. Més informació a https://yunohost.org/backup;\n - Ser pacient un cop comenci la migració: en funció de la connexió Internet i del maquinari, podria estar unes hores per actualitzar-ho tot.",
|
||||
"migration_0015_system_not_fully_up_to_date": "El sistema no està completament al dia. Heu de fer una actualització normal abans de fer la migració a Buster.",
|
||||
"migration_0015_not_enough_free_space": "Hi ha poc espai lliure a /var/! HI hauria d'haver un mínim de 1GB lliure per poder fer aquesta migració.",
|
||||
"migration_0015_not_stretch": "La distribució actual de Debian no és Stretch!",
|
||||
"migration_0015_yunohost_upgrade": "Començant l'actualització del nucli de YunoHost…",
|
||||
"migration_0015_still_on_stretch_after_main_upgrade": "Alguna cosa ha anat malament durant la actualització principal, sembla que el sistema encara està en Debian Stretch",
|
||||
"migration_0015_main_upgrade": "Començant l'actualització principal…",
|
||||
"migration_0015_patching_sources_list": "Apedaçament de source.lists…",
|
||||
"migration_0015_start": "Començant la migració a Buster",
|
||||
"migration_description_0015_migrate_to_buster": "Actualitza els sistema a Debian Buster i YunoHost 4.x",
|
||||
"regenconf_need_to_explicitly_specify_ssh": "La configuració ssh ha estat modificada manualment, però heu d'especificar explícitament la categoria «ssh» amb --force per fer realment els canvis.",
|
||||
"migration_0015_weak_certs": "S'han trobat els següents certificats que encara utilitzen algoritmes de signatura febles i s'han d'actualitzar per a ser compatibles amb la propera versió de nginx: {certs}"
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
"app_removed": "{app:s} wurde entfernt",
|
||||
"app_sources_fetch_failed": "Quelldateien konnten nicht abgerufen werden, ist die URL korrekt?",
|
||||
"app_unknown": "Unbekannte App",
|
||||
"app_upgrade_failed": "{app:s} konnte nicht aktualisiert werden",
|
||||
"app_upgrade_failed": "{app:s} konnte nicht aktualisiert werden: {error}",
|
||||
"app_upgraded": "{app:s} aktualisiert",
|
||||
"ask_email": "E-Mail-Adresse",
|
||||
"ask_firstname": "Vorname",
|
||||
|
@ -35,7 +35,7 @@
|
|||
"backup_hook_unknown": "Der Datensicherungshook '{hook:s}' unbekannt",
|
||||
"backup_invalid_archive": "Dies ist kein Backup-Archiv",
|
||||
"backup_nothings_done": "Keine Änderungen zur Speicherung",
|
||||
"backup_output_directory_forbidden": "Wähle ein anderes Ausgabeverzeichnis. Datensicherung können nicht in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var oder in Unterordnern von /home/yunohost.backup/archives erstellt werden",
|
||||
"backup_output_directory_forbidden": "Wähle ein anderes Ausgabeverzeichnis. Datensicherungen können nicht in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var oder in Unterordnern von /home/yunohost.backup/archives erstellt werden",
|
||||
"backup_output_directory_not_empty": "Der gewählte Ausgabeordner sollte leer sein",
|
||||
"backup_output_directory_required": "Für die Datensicherung muss ein Zielverzeichnis angegeben werden",
|
||||
"backup_running_hooks": "Datensicherunghook wird ausgeführt…",
|
||||
|
@ -165,28 +165,28 @@
|
|||
"mailbox_used_space_dovecot_down": "Der Dovecot Mailbox Dienst muss gestartet sein, wenn du den von der Mailbox belegten Speicher angezeigen lassen willst",
|
||||
"package_unknown": "Unbekanntes Paket '{pkgname}'",
|
||||
"certmanager_attempt_to_replace_valid_cert": "Du versuchst gerade eine richtiges und gültiges Zertifikat der Domain {domain:s} zu überschreiben! (Benutze --force , um diese Nachricht zu umgehen)",
|
||||
"certmanager_domain_unknown": "Unbekannte Domain {domain:s}",
|
||||
"certmanager_domain_cert_not_selfsigned": "Das Zertifikat der Domain {domain:s} is kein selbstsigniertes Zertifikat. Bist du dir sicher, dass du es ersetzen willst? (Benutze --force)",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Es scheint so als wäre die Aktivierung des Zertifikats für die Domain {domain:s} fehlgeschlagen...",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "Das Zertifikat der Domain {domain:s} wurde nicht von Let's Encrypt ausgestellt. Es kann nicht automatisch erneuert werden!",
|
||||
"certmanager_domain_unknown": "Unbekannte Domain '{domain:s}'",
|
||||
"certmanager_domain_cert_not_selfsigned": "Das Zertifikat der Domain {domain:s} ist kein selbstsigniertes Zertifikat. Bist du dir sicher, dass du es ersetzen willst? (Benutze dafür '--force')",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Die Aktivierung des neuen Zertifikats für die Domain {domain:s} ist fehlgeschlagen…",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "Das Zertifikat der Domain '{domain:s}' wurde nicht von Let's Encrypt ausgestellt. Es kann nicht automatisch erneuert werden!",
|
||||
"certmanager_attempt_to_renew_valid_cert": "Das Zertifikat der Domain {domain:s} läuft nicht in Kürze ab! (Benutze --force um diese Nachricht zu umgehen)",
|
||||
"certmanager_domain_http_not_working": "Es scheint so, dass die Domain {domain:s} nicht über HTTP erreicht werden kann. Bitte überprüfe, ob deine DNS und nginx Konfiguration in Ordnung ist",
|
||||
"certmanager_error_no_A_record": "Kein DNS 'A' Eintrag für die Domain {domain:s} gefunden. Dein Domainname muss auf diese Maschine weitergeleitet werden, um ein Let's Encrypt Zertifikat installieren zu können! (Wenn du weißt was du tust, kannst du --no-checks benutzen, um diese Überprüfung zu überspringen. )",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "Der DNS 'A' Eintrag der Domain {domain:s} unterscheidet sich von dieser Server-IP. Wenn du gerade deinen A Eintrag verändert hast, warte bitte etwas, damit die Änderungen wirksam werden (du kannst die DNS Propagation mittels Website überprüfen) (Wenn du weißt was du tust, kannst du --no-checks benutzen, um diese Überprüfung zu überspringen. )",
|
||||
"certmanager_cannot_read_cert": "Es ist ein Fehler aufgetreten, als es versucht wurde das aktuelle Zertifikat für die Domain {domain:s} zu öffnen (Datei: {file:s}), Grund: {reason:s}",
|
||||
"certmanager_cert_install_success_selfsigned": "Ein selbstsigniertes Zertifikat für die Domain {domain:s} wurde erfolgreich installiert!",
|
||||
"certmanager_cert_install_success": "Für die Domain {domain:s} wurde erfolgreich ein Let's Encrypt installiert!",
|
||||
"certmanager_cert_renew_success": "Das Let's Encrypt Zertifikat für die Domain {domain:s} wurde erfolgreich erneuert!",
|
||||
"certmanager_cert_install_success_selfsigned": "Ein selbstsigniertes Zertifikat für die Domain {domain:s} wurde erfolgreich installiert",
|
||||
"certmanager_cert_install_success": "Für die Domain {domain:s} wurde erfolgreich ein Let's Encrypt Zertifikat installiert.",
|
||||
"certmanager_cert_renew_success": "Das Let's Encrypt Zertifikat für die Domain {domain:s} wurde erfolgreich erneuert.",
|
||||
"certmanager_hit_rate_limit": "Es wurden innerhalb kurzer Zeit schon zu viele Zertifikate für die exakt gleiche Domain {domain:s} ausgestellt. Bitte versuche es später nochmal. Besuche https://letsencrypt.org/docs/rate-limits/ für mehr Informationen",
|
||||
"certmanager_cert_signing_failed": "Signieren des neuen Zertifikats ist fehlgeschlagen",
|
||||
"certmanager_cert_signing_failed": "Das neue Zertifikat konnte nicht signiert werden",
|
||||
"certmanager_no_cert_file": "Die Zertifikatsdatei für die Domain {domain:s} (Datei: {file:s}) konnte nicht gelesen werden",
|
||||
"certmanager_conflicting_nginx_file": "Die Domain konnte nicht für die ACME challenge vorbereitet werden: Die nginx Konfigurationsdatei {filepath:s} verursacht Probleme und sollte vorher entfernt werden",
|
||||
"domain_cannot_remove_main": "Die primäre Domain konnten nicht entfernt werden. Lege zuerst einen neue primäre Domain fest",
|
||||
"certmanager_self_ca_conf_file_not_found": "Die Konfigurationsdatei der Zertifizierungsstelle für selbstsignierte Zertifikate wurde nicht gefunden (Datei {file:s})",
|
||||
"certmanager_acme_not_configured_for_domain": "Das Zertifikat für die Domain {domain:s} scheint nicht richtig installiert zu sein. Bitte führe den Befehl cert-install für diese Domain nochmals aus.",
|
||||
"certmanager_acme_not_configured_for_domain": "Die ACME Challenge kann im Moment nicht für {domain} ausgeführt werden, weil in ihrer nginx conf das entsprechende Code-Snippet fehlt... Bitte stellen Sie sicher, dass Ihre nginx-Konfiguration mit 'yunohost tools regen-conf nginx --dry-run --with-diff' auf dem neuesten Stand ist.",
|
||||
"certmanager_unable_to_parse_self_CA_name": "Der Name der Zertifizierungsstelle für selbstsignierte Zertifikate konnte nicht analysiert werden (Datei: {file:s})",
|
||||
"certmanager_http_check_timeout": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte sich selbst über HTTP mit der öffentlichen IP (Domain {domain:s} mit der IP {ip:s}) zu erreichen. Möglicherweise ist dafür hairpinning oder eine falsch konfigurierte Firewall/Router deines Servers dafür verantwortlich.",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte die Teilzertifikate von Let's Encrypt zusammenzusetzen. Die Installation/Erneuerung des Zertifikats wurde abgebrochen - bitte versuche es später erneut.",
|
||||
"certmanager_http_check_timeout": "Eine Zeitüberschreitung ist aufgetreten, als der Server versuchte sich selbst über HTTP mit der öffentlichen IP (Domain '{domain:s}' mit der IP '{ip:s}') zu erreichen. Möglicherweise ist dafür hairpinning oder eine falsch konfigurierte Firewall/Router deines Servers dafür verantwortlich.",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte die Teilzertifikate von Let's Encrypt zusammenzusetzen. Die Installation/Erneuerung des Zertifikats wurde abgebrochen — bitte versuche es später erneut.",
|
||||
"domain_hostname_failed": "Erstellen des neuen Hostnamens fehlgeschlagen",
|
||||
"yunohost_ca_creation_success": "Die lokale Zertifizierungs-Authorität wurde angelegt.",
|
||||
"app_already_installed_cant_change_url": "Diese Application ist bereits installiert. Die URL kann durch diese Funktion nicht modifiziert werden. Überprüfe ob `app changeurl` verfügbar ist.",
|
||||
|
@ -213,7 +213,7 @@
|
|||
"domain_dns_conf_is_just_a_recommendation": "Dieser Befehl zeigt Ihnen, was die * empfohlene * Konfiguration ist. Die DNS-Konfiguration wird NICHT für Sie eingerichtet. Es liegt in Ihrer Verantwortung, Ihre DNS-Zone in Ihrem Registrar gemäß dieser Empfehlung zu konfigurieren.",
|
||||
"dpkg_lock_not_available": "Dieser Befehl kann momentan nicht ausgeführt werden, da anscheinend ein anderes Programm die Sperre von dpkg (dem Systempaket-Manager) verwendet",
|
||||
"confirm_app_install_thirdparty": "WARNUNG! Das Installieren von Anwendungen von Drittanbietern kann die Integrität und Sicherheit Deines Systems beeinträchtigen. Du solltest es wahrscheinlich NICHT installieren, es sei denn, Du weisst, was Du tust. Bist du bereit, dieses Risiko einzugehen? [{answers:s}]",
|
||||
"confirm_app_install_danger": "WARNUNG! Diese Anwendung ist noch experimentell (wenn nicht ausdrücklich \"not working\"/\"funktioniert nicht\") und es ist wahrscheinlich, dass Dein System Schaden nimmt! Du solltest es wahrscheinlich NICHT installieren, es sei denn, Du weisst, was Du tust. Bist du bereit, dieses Risiko einzugehen? [{answers:s}]",
|
||||
"confirm_app_install_danger": "WARNUNG! Diese Anwendung ist noch experimentell (wenn nicht ausdrücklich \"not working\"/\"nicht funktionsfähig\")! Du solltest es wahrscheinlich NICHT installieren, es sei denn, du weißt, was du tust. Es wird keine Unterstützung geleistet, falls diese Anwendung nicht funktioniert oder dein System zerstört... Falls du bereit bist, dieses Risiko einzugehen, tippe '{answers:s}'",
|
||||
"confirm_app_install_warning": "Warnung: Diese Anwendung funktioniert möglicherweise, ist jedoch nicht gut in YunoHost integriert. Einige Funktionen wie Single Sign-On und Backup / Restore sind möglicherweise nicht verfügbar. Trotzdem installieren? [{answers:s}] ",
|
||||
"backup_with_no_restore_script_for_app": "Die App {app:s} hat kein Wiederherstellungsskript. Das Backup dieser App kann nicht automatisch wiederhergestellt werden.",
|
||||
"backup_with_no_backup_script_for_app": "Die App {app:s} hat kein Sicherungsskript. Ignoriere es.",
|
||||
|
@ -231,7 +231,7 @@
|
|||
"backup_csv_creation_failed": "Die zur Wiederherstellung erforderliche CSV-Datei kann nicht erstellt werden",
|
||||
"backup_couldnt_bind": "{src:s} konnte nicht an {dest:s} angebunden werden.",
|
||||
"backup_borg_not_implemented": "Die Borg-Sicherungsmethode ist noch nicht implementiert",
|
||||
"backup_ask_for_copying_if_needed": "Möchten Sie die Sicherung mit {size:s} MB temporär durchführen? (Dieser Weg wird verwendet, da einige Dateien nicht mit einer effizienteren Methode vorbereitet werden konnten).",
|
||||
"backup_ask_for_copying_if_needed": "Möchten Sie die Sicherung mit {size:s} MB temporär durchführen? (Dieser Weg wird verwendet, da einige Dateien nicht mit einer effizienteren Methode vorbereitet werden konnten.)",
|
||||
"backup_actually_backuping": "Erstellt ein Backup-Archiv aus den gesammelten Dateien …",
|
||||
"ask_new_path": "Neuer Pfad",
|
||||
"ask_new_domain": "Neue Domain",
|
||||
|
@ -302,17 +302,44 @@
|
|||
"app_install_script_failed": "Im Installationsscript ist ein Fehler aufgetreten",
|
||||
"app_remove_after_failed_install": "Entfernen der App nach fehlgeschlagener Installation…",
|
||||
"app_upgrade_script_failed": "Es ist ein Fehler im App-Upgrade-Skript aufgetreten",
|
||||
"diagnosis_basesystem_host": "Server läuft unter Debian {debian_version}.",
|
||||
"diagnosis_basesystem_host": "Server läuft unter Debian {debian_version}",
|
||||
"diagnosis_basesystem_kernel": "Server läuft unter Linux-Kernel {kernel_version}",
|
||||
"diagnosis_basesystem_ynh_single_version": "{package} Version: {version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_main_version": "Server läuft YunoHost {main_version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_inconsistent_versions": "Sie verwenden inkonsistente Versionen der YunoHost-Pakete... wahrscheinlich wegen eines fehlgeschlagenen oder teilweisen Upgrades.",
|
||||
"diagnosis_display_tip_web": "Sie können den Abschnitt Diagnose (im Startbildschirm) aufrufen, um die gefundenen Probleme anzuzeigen.",
|
||||
"apps_catalog_init_success": "Apps-Katalogsystem initialisiert!",
|
||||
"apps_catalog_updating": "Aktualisierung des Applikationskatalogs...",
|
||||
"apps_catalog_failed_to_download": "Der {apps_catalog} Apps-Katalog kann nicht heruntergeladen werden: {error}",
|
||||
"apps_catalog_obsolete_cache": "Der Cache des Apps-Katalogs ist leer oder veraltet.",
|
||||
"apps_catalog_init_success": "App-Katalogsystem initialisiert!",
|
||||
"apps_catalog_updating": "Aktualisierung des Applikationskatalogs…",
|
||||
"apps_catalog_failed_to_download": "Der {apps_catalog} App-Katalog kann nicht heruntergeladen werden: {error}",
|
||||
"apps_catalog_obsolete_cache": "Der Cache des App-Katalogs ist leer oder veraltet.",
|
||||
"apps_catalog_update_success": "Der Apps-Katalog wurde aktualisiert!",
|
||||
"password_too_simple_1": "Das Passwort muss mindestens 8 Zeichen lang sein",
|
||||
"diagnosis_display_tip_cli": "Sie können 'yunohost diagnosis show --issues' ausführen, um die gefundenen Probleme anzuzeigen."
|
||||
"diagnosis_display_tip_cli": "Sie können 'yunohost diagnosis show --issues' ausführen, um die gefundenen Probleme anzuzeigen.",
|
||||
"diagnosis_everything_ok": "Alles schaut gut aus für {category}!",
|
||||
"diagnosis_failed": "Kann Diagnose-Ergebnis für die Kategorie '{category}' nicht abrufen: {error}",
|
||||
"diagnosis_ip_connected_ipv4": "Der Server ist mit dem Internet über IPv4 verbunden!",
|
||||
"diagnosis_no_cache": "Kein Diagnose Cache aktuell für die Kategorie '{category}'",
|
||||
"diagnosis_ip_no_ipv4": "Der Server hat kein funktionierendes IPv4.",
|
||||
"diagnosis_ip_connected_ipv6": "Der Server ist mit dem Internet über IPv6 verbunden!",
|
||||
"diagnosis_ip_no_ipv6": "Der Server hat kein funktionierendes IPv6.",
|
||||
"diagnosis_ip_not_connected_at_all": "Der Server scheint überhaupt nicht mit dem Internet verbunden zu sein!?",
|
||||
"diagnosis_failed_for_category": "Diagnose fehlgeschlagen für die Kategorie '{category}': {error}",
|
||||
"diagnosis_cache_still_valid": "(Cache noch gültig für {category} Diagnose. Werde keine neue Diagnose durchführen!)",
|
||||
"diagnosis_cant_run_because_of_dep": "Kann Diagnose für {category} nicht ausführen während wichtige Probleme zu {dep} noch nicht behoben sind.",
|
||||
"diagnosis_found_errors_and_warnings": "Habe {errors} erhebliche(s) Problem(e) (und {warnings} Warnung(en)) in Verbindung mit {category} gefunden!",
|
||||
"diagnosis_ip_broken_dnsresolution": "Domänen-Namens-Auflösung scheint aus einem bestimmten Grund nicht zu funktionieren... Blockiert eine Firewall die DNS Anfragen?",
|
||||
"diagnosis_ip_broken_resolvconf": "Domänen-Namens-Auflösung scheint nicht zu funktionieren, was daran liegen könnte, dass in <code>/etc/resolv.conf</> kein Eintrag auf <code>127.0.0.1</code> zeigt.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "Stattdessen sollte diese Datei ein Softlink auf /etc/resolvconf/run/resolv.conf sein, die auf sich selbst zu 127.0.0.1 zeigt (dnsmasq). Der eigentlich Auflösende sollte in /etc/resolv.dnsmasq.conf konfiguriert werden.",
|
||||
"diagnosis_dns_good_conf": "Gute DNS Konfiguration für Domäne {domain} (Kategorie {category})",
|
||||
"diagnosis_ignored_issues": "(+ {nb_ignored} ignorierte(s) Problem(e))",
|
||||
"diagnosis_basesystem_hardware": "Server Hardware Architektur ist {virt} {arch}",
|
||||
"diagnosis_basesystem_hardware_board": "Server Platinen Modell ist {model}",
|
||||
"diagnosis_found_errors": "Habe {errors} erhebliche(s) Problem(e) in Verbindung mit {category} gefunden!",
|
||||
"diagnosis_found_warnings": "Habe {warnings} Ding(e) gefunden, die verbessert werden könnten für {category}.",
|
||||
"diagnosis_ip_dnsresolution_working": "Domänen-Namens-Auflösung funktioniert!",
|
||||
"diagnosis_ip_weird_resolvconf": "DNS Auflösung scheint zu funktionieren, aber sei vorsichtig wenn du eine eigene <code>/etc/resolv.conf</code> verwendest.",
|
||||
"diagnosis_display_tip": "Um die gefundenen Probleme zu sehen, kannst Du zum Diagnose-Bereich des webadmin gehen, oder 'yunohost diagnosis show --issues' in der Kommandozeile ausführen.",
|
||||
"backup_archive_corrupted": "Das Backup-Archiv '{archive}' scheint beschädigt: {error}",
|
||||
"backup_archive_cant_retrieve_info_json": "Die Informationen für das Archiv '{archive}' konnten nicht geladen werden... Die Datei info.json wurde nicht gefunden (oder ist kein gültiges json).",
|
||||
"app_packaging_format_not_supported": "Diese App kann nicht installiert werden da das Paketformat nicht von der Yunohost-Version unterstützt wird. Denken Sie darüber nach das System zu aktualisieren."
|
||||
}
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
{
|
||||
"password_too_simple_1": "Ο κωδικός πρόσβασης πρέπει να έχει μήκος τουλάχιστον 8 χαρακτήρων"
|
||||
"password_too_simple_1": "Ο κωδικός πρόσβασης πρέπει να έχει τουλάχιστον 8 χαρακτήρες"
|
||||
}
|
217
locales/en.json
217
locales/en.json
|
@ -32,25 +32,26 @@
|
|||
"app_manifest_invalid": "Something is wrong with the app manifest: {error}",
|
||||
"app_not_upgraded": "The app '{failed_app}' failed to upgrade, and as a consequence the following apps' upgrades have been cancelled: {apps}",
|
||||
"app_not_correctly_installed": "{app:s} seems to be incorrectly installed",
|
||||
"app_not_installed": "Could not find the app '{app:s}' in the list of installed apps: {all_apps}",
|
||||
"app_not_installed": "Could not find {app:s} in the list of installed apps: {all_apps}",
|
||||
"app_not_properly_removed": "{app:s} has not been properly removed",
|
||||
"app_removed": "{app:s} removed",
|
||||
"app_requirements_checking": "Checking required packages for {app}…",
|
||||
"app_requirements_checking": "Checking required packages for {app}...",
|
||||
"app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}",
|
||||
"app_remove_after_failed_install": "Removing the app following the installation failure…",
|
||||
"app_remove_after_failed_install": "Removing the app following the installation failure...",
|
||||
"app_sources_fetch_failed": "Could not fetch sources files, is the URL correct?",
|
||||
"app_start_install": "Installing the app '{app}'…",
|
||||
"app_start_remove": "Removing the app '{app}'…",
|
||||
"app_start_backup": "Collecting files to be backed up for the app '{app}'…",
|
||||
"app_start_restore": "Restoring the app '{app}'…",
|
||||
"app_start_install": "Installing {app}...",
|
||||
"app_start_remove": "Removing {app}...",
|
||||
"app_start_backup": "Collecting files to be backed up for {app}...",
|
||||
"app_start_restore": "Restoring {app}...",
|
||||
"app_unknown": "Unknown app",
|
||||
"app_unsupported_remote_type": "Unsupported remote type used for the app",
|
||||
"app_upgrade_several_apps": "The following apps will be upgraded: {apps}",
|
||||
"app_upgrade_app_name": "Now upgrading {app}…",
|
||||
"app_upgrade_app_name": "Now upgrading {app}...",
|
||||
"app_upgrade_failed": "Could not upgrade {app:s}: {error}",
|
||||
"app_upgrade_script_failed": "An error occurred inside the app upgrade script",
|
||||
"app_upgrade_some_app_failed": "Some apps could not be upgraded",
|
||||
"app_upgraded": "{app:s} upgraded",
|
||||
"app_packaging_format_not_supported": "This app cannot be installed because its packaging format is not supported by your YunoHost version. You should probably consider upgrading your system.",
|
||||
"apps_already_up_to_date": "All apps are already up-to-date",
|
||||
"apps_catalog_init_success": "App catalog system initialized!",
|
||||
"apps_catalog_updating": "Updating application catalog…",
|
||||
|
@ -66,22 +67,22 @@
|
|||
"ask_new_path": "New path",
|
||||
"ask_password": "Password",
|
||||
"backup_abstract_method": "This backup method has yet to be implemented",
|
||||
"backup_actually_backuping": "Creating a backup archive from the collected files…",
|
||||
"backup_app_failed": "Could not back up the app '{app:s}'",
|
||||
"backup_applying_method_borg": "Sending all files to backup into borg-backup repository…",
|
||||
"backup_applying_method_copy": "Copying all files to backup…",
|
||||
"backup_applying_method_custom": "Calling the custom backup method '{method:s}'…",
|
||||
"backup_applying_method_tar": "Creating the backup TAR archive…",
|
||||
"backup_archive_app_not_found": "Could not find the app '{app:s}' in the backup archive",
|
||||
"backup_actually_backuping": "Creating a backup archive from the collected files...",
|
||||
"backup_app_failed": "Could not back up {app:s}",
|
||||
"backup_applying_method_borg": "Sending all files to backup into borg-backup repository...",
|
||||
"backup_applying_method_copy": "Copying all files to backup...",
|
||||
"backup_applying_method_custom": "Calling the custom backup method '{method:s}'...",
|
||||
"backup_applying_method_tar": "Creating the backup TAR archive...",
|
||||
"backup_archive_app_not_found": "Could not find {app:s} in the backup archive",
|
||||
"backup_archive_broken_link": "Could not access the backup archive (broken link to {path:s})",
|
||||
"backup_archive_name_exists": "A backup archive with this name already exists.",
|
||||
"backup_archive_name_unknown": "Unknown local backup archive named '{name:s}'",
|
||||
"backup_archive_open_failed": "Could not open the backup archive",
|
||||
"backup_archive_cant_retrieve_info_json": "Could not load infos for archive '{archive}' ... The info.json cannot be retrieved (or is not a valid json).",
|
||||
"backup_archive_cant_retrieve_info_json": "Could not load infos for archive '{archive}'... The info.json cannot be retrieved (or is not a valid json).",
|
||||
"backup_archive_corrupted": "It looks like the backup archive '{archive}' is corrupted : {error}",
|
||||
"backup_archive_system_part_not_available": "System part '{part:s}' unavailable in this backup",
|
||||
"backup_archive_writing_error": "Could not add the files '{source:s}' (named in the archive '{dest:s}') to be backed up into the compressed archive '{archive:s}'",
|
||||
"backup_ask_for_copying_if_needed": "Do you want to perform the backup using {size:s} MB temporarily? (This way is used since some files could not be prepared using a more efficient method.)",
|
||||
"backup_ask_for_copying_if_needed": "Do you want to perform the backup using {size:s}MB temporarily? (This way is used since some files could not be prepared using a more efficient method.)",
|
||||
"backup_borg_not_implemented": "The Borg backup method is not yet implemented",
|
||||
"backup_cant_mount_uncompress_archive": "Could not mount the uncompressed archive as write protected",
|
||||
"backup_cleaning_failed": "Could not clean up the temporary backup folder",
|
||||
|
@ -100,20 +101,19 @@
|
|||
"backup_method_copy_finished": "Backup copy finalized",
|
||||
"backup_method_custom_finished": "Custom backup method '{method:s}' finished",
|
||||
"backup_method_tar_finished": "TAR backup archive created",
|
||||
"backup_mount_archive_for_restore": "Preparing archive for restoration…",
|
||||
"backup_mount_archive_for_restore": "Preparing archive for restoration...",
|
||||
"backup_no_uncompress_archive_dir": "There is no such uncompressed archive directory",
|
||||
"backup_nothings_done": "Nothing to save",
|
||||
"backup_output_directory_forbidden": "Pick a different output directory. Backups cannot be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders",
|
||||
"backup_output_directory_not_empty": "You should pick an empty output directory",
|
||||
"backup_output_directory_required": "You must provide an output directory for the backup",
|
||||
"backup_output_symlink_dir_broken": "Your archive directory '{path:s}' is a broken symlink. Maybe you forgot to re/mount or plug in the storage medium it points to.",
|
||||
"backup_permission": "Backup permission for app {app:s}",
|
||||
"backup_php5_to_php7_migration_may_fail": "Could not convert your archive to support PHP 7, you may be unable to restore your PHP apps (reason: {error:s})",
|
||||
"backup_running_hooks": "Running backup hooks…",
|
||||
"backup_permission": "Backup permission for {app:s}",
|
||||
"backup_running_hooks": "Running backup hooks...",
|
||||
"backup_system_part_failed": "Could not backup the '{part:s}' system part",
|
||||
"backup_unable_to_organize_files": "Could not use the quick method to organize files in the archive",
|
||||
"backup_with_no_backup_script_for_app": "The app '{app:s}' has no backup script. Ignoring.",
|
||||
"backup_with_no_restore_script_for_app": "The '{app:s}' has no restoration script, you will not be able to automatically restore the backup of this app.",
|
||||
"backup_with_no_restore_script_for_app": "{app:s} has no restoration script, you will not be able to automatically restore the backup of this app.",
|
||||
"certmanager_acme_not_configured_for_domain": "The ACME challenge cannot be ran for {domain} right now because its nginx conf lacks the corresponding code snippet... Please make sure that your nginx configuration is up to date using `yunohost tools regen-conf nginx --dry-run --with-diff`.",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "The certificate for the domain '{domain:s}' is not issued by Let's Encrypt. Cannot renew it automatically!",
|
||||
"certmanager_attempt_to_renew_valid_cert": "The certificate for the domain '{domain:s}' is not about to expire! (You may use --force if you know what you're doing)",
|
||||
|
@ -123,16 +123,15 @@
|
|||
"certmanager_cert_install_success_selfsigned": "Self-signed certificate now installed for the domain '{domain:s}'",
|
||||
"certmanager_cert_renew_success": "Let's Encrypt certificate renewed for the domain '{domain:s}'",
|
||||
"certmanager_cert_signing_failed": "Could not sign the new certificate",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Trying to use the new certificate for {domain:s} did not work…",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Trying to use the new certificate for {domain:s} did not work...",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Timed out when trying to fetch intermediate certificate from Let's Encrypt. Certificate installation/renewal aborted—please try again later.",
|
||||
"certmanager_domain_not_diagnosed_yet": "There is no diagnosis result for domain {domain} yet. Please re-run a diagnosis for categories 'DNS records' and 'Web' in the diagnosis section to check if the domain is ready for Let's Encrypt. (Or if you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_cert_not_selfsigned": "The certificate for domain {domain:s} is not self-signed. Are you sure you want to replace it? (Use '--force' to do so.)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "The DNS 'A' record for the domain '{domain:s}' is different from this server's IP. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_http_not_working": "It seems the domain {domain:s} cannot be accessed through HTTP. Check that your DNS and NGINX configuration is correct",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "The DNS records for domain '{domain:s}' is different from this server's IP. Please check the 'DNS records' (basic) category in the diagnosis for more info. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_http_not_working": "Domain {domain:s} does not seem to be accessible through HTTP. Please check the 'Web' category in the diagnosis for more info. (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_unknown": "Unknown domain '{domain:s}'",
|
||||
"certmanager_error_no_A_record": "No DNS 'A' record found for '{domain:s}'. You need to make your domain name point to your machine to be able to install a Let's Encrypt certificate. (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_warning_subdomain_dns_record": "Subdomain '{subdomain:s}' does not resolve to the same IP address as '{domain:s}'. Some features will not be available until you fix this and regenerate the certificate.",
|
||||
"certmanager_hit_rate_limit": "Too many certificates already issued for this exact set of domains {domain:s} recently. Please try again later. See https://letsencrypt.org/docs/rate-limits/ for more details",
|
||||
"certmanager_http_check_timeout": "Timed out when server tried to contact itself through HTTP using a public IP address (domain '{domain:s}' with IP '{ip:s}'). You may be experiencing a hairpinning issue, or the firewall/router ahead of your server is misconfigured.",
|
||||
"certmanager_no_cert_file": "Could not read the certificate file for the domain {domain:s} (file: {file:s})",
|
||||
"certmanager_self_ca_conf_file_not_found": "Could not find configuration file for self-signing authority (file: {file:s})",
|
||||
"certmanager_unable_to_parse_self_CA_name": "Could not parse name of self-signing authority (file: {file:s})",
|
||||
|
@ -158,10 +157,11 @@
|
|||
"diagnosis_everything_ok": "Everything looks good for {category}!",
|
||||
"diagnosis_failed": "Failed to fetch diagnosis result for category '{category}': {error}",
|
||||
"diagnosis_no_cache": "No diagnosis cache yet for category '{category}'",
|
||||
"diagnosis_ip_connected_ipv4": "The server is connected to the Internet through IPv4 !",
|
||||
"diagnosis_ip_connected_ipv4": "The server is connected to the Internet through IPv4!",
|
||||
"diagnosis_ip_no_ipv4": "The server does not have working IPv4.",
|
||||
"diagnosis_ip_connected_ipv6": "The server is connected to the Internet through IPv6 !",
|
||||
"diagnosis_ip_connected_ipv6": "The server is connected to the Internet through IPv6!",
|
||||
"diagnosis_ip_no_ipv6": "The server does not have working IPv6.",
|
||||
"diagnosis_ip_no_ipv6_tip": "Having a working IPv6 is not mandatory for your server to work, but it is better for the health of the Internet as a whole. IPv6 should usually be automatically configured by the system or your provider if it's available. Otherwise, you might need to configure a few things manually as explained in the documentation here: <a href='https://yunohost.org/#/ipv6'>https://yunohost.org/#/ipv6</a>. If you cannot enable IPv6 or if it seems too technical for you, you can also safely ignore this warning.",
|
||||
"diagnosis_ip_global": "Global IP: <code>{global}</code>",
|
||||
"diagnosis_ip_local": "Local IP: <code>{local}</code>",
|
||||
"diagnosis_ip_not_connected_at_all": "The server does not seem to be connected to the Internet at all!?",
|
||||
|
@ -173,8 +173,16 @@
|
|||
"diagnosis_dns_good_conf": "DNS records are correctly configured for domain {domain} (category {category})",
|
||||
"diagnosis_dns_bad_conf": "Some DNS records are missing or incorrect for domain {domain} (category {category})",
|
||||
"diagnosis_dns_missing_record": "According to the recommended DNS configuration, you should add a DNS record with the following info.<br>Type: <code>{type}</code><br>Name: <code>{name}</code><br>Value: <code>{value}</code>",
|
||||
"diagnosis_dns_discrepancy": "The following DNS record does not seem to follow the recommended configuration:<br>Type: <code>{type}</code><br>Name: <code>{name}</code><br>Current value: <code>{current}</code><br>Excepted value: <code>{value}</code>",
|
||||
"diagnosis_dns_discrepancy": "The following DNS record does not seem to follow the recommended configuration:<br>Type: <code>{type}</code><br>Name: <code>{name}</code><br>Current value: <code>{current}</code><br>Expected value: <code>{value}</code>",
|
||||
"diagnosis_dns_point_to_doc": "Please check the documentation at <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> if you need help about configuring DNS records.",
|
||||
"diagnosis_dns_try_dyndns_update_force": "This domain's DNS configuration should automatically be managed by Yunohost. If that's not the case, you can try to force an update using <cmd>yunohost dyndns update --force</cmd>.",
|
||||
"diagnosis_domain_expiration_not_found": "Unable to check the expiration date for some domains",
|
||||
"diagnosis_domain_not_found_details": "The domain {domain} doesn't exist in WHOIS database or is expired!",
|
||||
"diagnosis_domain_expiration_not_found_details": "The WHOIS information for domain {domain} doesn't seem to contain the information about the expiration date?",
|
||||
"diagnosis_domain_expiration_success": "Your domains are registered and not going to expire anytime soon.",
|
||||
"diagnosis_domain_expiration_warning": "Some domains will expire soon!",
|
||||
"diagnosis_domain_expiration_error": "Some domains will expire VERY SOON!",
|
||||
"diagnosis_domain_expires_in": "{domain} expires in {days} days.",
|
||||
"diagnosis_services_running": "Service {service} is running!",
|
||||
"diagnosis_services_conf_broken": "Configuration is broken for service {service}!",
|
||||
"diagnosis_services_bad_status": "Service {service} is {status} :(",
|
||||
|
@ -188,6 +196,7 @@
|
|||
"diagnosis_swap_none": "The system has no swap at all. You should consider adding at least {recommended} of swap to avoid situations where the system runs out of memory.",
|
||||
"diagnosis_swap_notsomuch": "The system has only {total} swap. You should consider having at least {recommended} to avoid situations where the system runs out of memory.",
|
||||
"diagnosis_swap_ok": "The system has {total} of swap!",
|
||||
"diagnosis_swap_tip": "Please be careful and aware that if the server is hosting swap on an SD card or SSD storage, it may drastically reduce the life expectancy of the device`.",
|
||||
"diagnosis_mail_outgoing_port_25_ok": "The SMTP mail server is able to send emails (outgoing port 25 is not blocked).",
|
||||
"diagnosis_mail_outgoing_port_25_blocked": "The SMTP mail server cannot send emails to other servers because outgoing port 25 is blocked in IPv{ipversion}.",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_details": "You should first try to unblock outgoing port 25 in your internet router interface or your hosting provider interface. (Some hosting provider may require you to send them a support ticket for this).",
|
||||
|
@ -197,21 +206,21 @@
|
|||
"diagnosis_mail_ehlo_unreachable_details": "Could not open a connection on port 25 to your server in IPv{ipversion}. It appears to be unreachable.<br>1. The most common cause for this issue is that port 25 <a href='https://yunohost.org/isp_box_config'>is not correctly forwarded to your server</a>.<br>2. You should also make sure that service postfix is running.<br>3. On more complex setups: make sure that no firewall or reverse-proxy is interfering.",
|
||||
"diagnosis_mail_ehlo_bad_answer": "A non-SMTP service answered on port 25 on IPv{ipversion}",
|
||||
"diagnosis_mail_ehlo_bad_answer_details": "It could be due to an other machine answering instead of your server.",
|
||||
"diagnosis_mail_ehlo_wrong": "A different SMTP mail server answers on IPv{ipversion}. It will probably not be able to receive emails.",
|
||||
"diagnosis_mail_ehlo_wrong_details": "The EHLO received by the remote diagnoser in IPv{ipversion} is different from your server's domain.<br>Received EHLO: <code>{wrong_ehlo}</code><br>Expected: {right_ehlo}<br>The most common cause for this issue is that port 25 <a href='https://yunohost.org/isp_box_config'>is not correctly forwarded to your server</a>. Alternatively, make sure that no firewall or reverse-proxy is interfering.",
|
||||
"diagnosis_mail_ehlo_wrong": "A different SMTP mail server answers on IPv{ipversion}. Your server will probably not be able to receive emails.",
|
||||
"diagnosis_mail_ehlo_wrong_details": "The EHLO received by the remote diagnoser in IPv{ipversion} is different from your server's domain.<br>Received EHLO: <code>{wrong_ehlo}</code><br>Expected: <code>{right_ehlo}</code><br>The most common cause for this issue is that port 25 <a href='https://yunohost.org/isp_box_config'>is not correctly forwarded to your server</a>. Alternatively, make sure that no firewall or reverse-proxy is interfering.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose": "Could not diagnose if postfix mail server is reachable from outside in IPv{ipversion}.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose_details": "Error: {error}",
|
||||
"diagnosis_mail_fcrdns_ok": "Your reverse DNS is correctly configured!",
|
||||
"diagnosis_mail_fcrdns_dns_missing": "No reverse DNS is defined in IPv{ipversion}. Some emails may fail to get delivered or may get flagged as spam.",
|
||||
"diagnosis_mail_fcrdns_nok_details": "You should first try to configure the reverse DNS with <code>{ehlo_domain}</code> in your internet router interface or your hosting provider interface. (Some hosting provider may require you to send them a support ticket for this).",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_4": "Some providers won't let you configure your reverse DNS (or their feature might be broken...). If you are experiencing issues because of this, consider the following solutions:<br> - Some ISP provide the alternative of <a href='https://yunohost.org/#/smtp_relay'>using a mail server relay</a> though it implies that the relay will be able to spy on your email traffic.<br>- A privacy-friendly alternative is to use a VPN *with a dedicated public IP* to bypass this kind of limits. See <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a><br>- Finally, it's also possible to <a href='https://yunohost.org/#/isp'>change of provider</a>",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_4": "Some providers won't let you configure your reverse DNS (or their feature might be broken...). If you are experiencing issues because of this, consider the following solutions:<br> - Some ISP provide the alternative of <a href='https://yunohost.org/#/smtp_relay'>using a mail server relay</a> though it implies that the relay will be able to spy on your email traffic.<br>- A privacy-friendly alternative is to use a VPN *with a dedicated public IP* to bypass this kind of limits. See <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a><br>- Or it's possible to <a href='https://yunohost.org/#/isp'>switch to a different provider</a>",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_6": "Some providers won't let you configure your reverse DNS (or their feature might be broken...). If your reverse DNS is correctly configured for IPv4, you can try disabling the use of IPv6 when sending emails by running <cmd>yunohost settings set smtp.allow_ipv6 -v off</cmd>. Note: this last solution means that you won't be able to send or receive emails from the few IPv6-only servers out there.",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain": "The reverse DNS is not correctly configured in IPv{ipversion}. Some emails may fail to get delivered or may get flagged as spam.",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "Current reverse DNS: <code>{rdns_domain}</code><br>Expected value: <code>{ehlo_domain}</code>",
|
||||
"diagnosis_mail_blacklist_ok": "The IPs and domains used by this server do not appear to be blacklisted",
|
||||
"diagnosis_mail_blacklist_listed_by": "Your IP or domain <code>{item}</code> is blacklisted on {blacklist_name}",
|
||||
"diagnosis_mail_blacklist_reason": "The blacklist reason is: {reason}",
|
||||
"diagnosis_mail_blacklist_website": "After identifying why you are listed and fixed it, feel free to ask for delisting on {blacklist_website}",
|
||||
"diagnosis_mail_blacklist_website": "After identifying why you are listed and fixed it, feel free to ask for your IP or domaine to be removed on {blacklist_website}",
|
||||
"diagnosis_mail_queue_ok": "{nb_pending} pending emails in the mail queues",
|
||||
"diagnosis_mail_queue_unavailable": "Can not consult number of pending emails in queue",
|
||||
"diagnosis_mail_queue_unavailable_details": "Error: {error}",
|
||||
|
@ -219,7 +228,6 @@
|
|||
"diagnosis_regenconf_allgood": "All configurations files are in line with the recommended configuration!",
|
||||
"diagnosis_regenconf_manually_modified": "Configuration file <code>{file}</code> appears to have been manually modified.",
|
||||
"diagnosis_regenconf_manually_modified_details": "This is probably OK if you know what you're doing! YunoHost will stop updating this file automatically... But beware that YunoHost upgrades could contain important recommended changes. If you want to, you can inspect the differences with <cmd>yunohost tools regen-conf {category} --dry-run --with-diff</cmd> and force the reset to the recommended configuration with <cmd>yunohost tools regen-conf {category} --force</cmd>",
|
||||
"diagnosis_security_all_good": "No critical security vulnerability was found.",
|
||||
"diagnosis_security_vulnerable_to_meltdown": "You appear vulnerable to the Meltdown criticial security vulnerability",
|
||||
"diagnosis_security_vulnerable_to_meltdown_details": "To fix this, you should upgrade your system and reboot to load the new linux kernel (or contact your server provider if this doesn't work). See https://meltdownattack.com/ for more infos.",
|
||||
"diagnosis_description_basesystem": "Base system",
|
||||
|
@ -231,7 +239,6 @@
|
|||
"diagnosis_description_web": "Web",
|
||||
"diagnosis_description_mail": "Email",
|
||||
"diagnosis_description_regenconf": "System configurations",
|
||||
"diagnosis_description_security": "Security checks",
|
||||
"diagnosis_ports_could_not_diagnose": "Could not diagnose if ports are reachable from outside in IPv{ipversion}.",
|
||||
"diagnosis_ports_could_not_diagnose_details": "Error: {error}",
|
||||
"diagnosis_ports_unreachable": "Port {port} is not reachable from outside.",
|
||||
|
@ -240,7 +247,7 @@
|
|||
"diagnosis_ports_needed_by": "Exposing this port is needed for {category} features (service {service})",
|
||||
"diagnosis_ports_forwarding_tip": "To fix this issue, you most probably need to configure port forwarding on your internet router as described in <a href='https://yunohost.org/isp_box_config'>https://yunohost.org/isp_box_config</a>",
|
||||
"diagnosis_http_hairpinning_issue": "Your local network does not seem to have hairpinning enabled.",
|
||||
"diagnosis_http_hairpinning_issue_details": "This is probably because of your ISP box / router. As a result, people from outside your local network will be able to access your server as expected, but not people from inside the local network (like you, probably?). You may be able to improve the situation by having a look at <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a>",
|
||||
"diagnosis_http_hairpinning_issue_details": "This is probably because of your ISP box / router. As a result, people from outside your local network will be able to access your server as expected, but not people from inside the local network (like you, probably?) when using the domain name or global IP. You may be able to improve the situation by having a look at <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a>",
|
||||
"diagnosis_http_could_not_diagnose": "Could not diagnose if domains are reachable from outside in IPv{ipversion}.",
|
||||
"diagnosis_http_could_not_diagnose_details": "Error: {error}",
|
||||
"diagnosis_http_ok": "Domain {domain} is reachable through HTTP from outside the local network.",
|
||||
|
@ -266,13 +273,13 @@
|
|||
"domain_dyndns_root_unknown": "Unknown DynDNS root domain",
|
||||
"domain_exists": "The domain already exists",
|
||||
"domain_hostname_failed": "Could not set new hostname. This might cause an issue later (it might be fine).",
|
||||
"domain_uninstall_app_first": "One or more apps are installed on this domain. Please uninstall them before proceeding to domain removal",
|
||||
"domain_uninstall_app_first": "Those applications are still installed on your domain: {apps}. Please uninstall them before proceeding to domain removal",
|
||||
"domain_named_unknown": "Domain '{domain}' unknown",
|
||||
"domain_unknown": "Unknown domain",
|
||||
"domains_available": "Available domains:",
|
||||
"done": "Done",
|
||||
"downloading": "Downloading…",
|
||||
"dpkg_is_broken": "You cannot do this right now because dpkg/APT (the system package managers) seems to be in a broken state… You can try to solve this issue by connecting through SSH and running `sudo dpkg --configure -a`.",
|
||||
"dpkg_is_broken": "You cannot do this right now because dpkg/APT (the system package managers) seems to be in a broken state… You can try to solve this issue by connecting through SSH and running `sudo apt install --fix-broken` and/or `sudo dpkg --configure -a`.",
|
||||
"dpkg_lock_not_available": "This command can't be run right now because another program seems to be using the lock of dpkg (the system package manager)",
|
||||
"dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.",
|
||||
"dyndns_could_not_check_available": "Could not check if {domain:s} is available on {provider:s}.",
|
||||
|
@ -281,7 +288,7 @@
|
|||
"dyndns_cron_removed": "DynDNS cron job removed",
|
||||
"dyndns_ip_update_failed": "Could not update IP address to DynDNS",
|
||||
"dyndns_ip_updated": "Updated your IP on DynDNS",
|
||||
"dyndns_key_generating": "Generating DNS key… It may take a while.",
|
||||
"dyndns_key_generating": "Generating DNS key... It may take a while.",
|
||||
"dyndns_key_not_found": "DNS key not found for the domain",
|
||||
"dyndns_no_domain_registered": "No domain registered with DynDNS",
|
||||
"dyndns_provider_unreachable": "Unable to reach DynDNS provider {provider}: either your YunoHost is not correctly connected to the internet or the dynette server is down.",
|
||||
|
@ -289,9 +296,9 @@
|
|||
"dyndns_registration_failed": "Could not register DynDNS domain: {error:s}",
|
||||
"dyndns_domain_not_provided": "DynDNS provider {provider:s} cannot provide domain {domain:s}.",
|
||||
"dyndns_unavailable": "The domain '{domain:s}' is unavailable.",
|
||||
"executing_command": "Executing command '{command:s}'…",
|
||||
"executing_script": "Executing script '{script:s}'…",
|
||||
"extracting": "Extracting…",
|
||||
"executing_command": "Executing command '{command:s}'...",
|
||||
"executing_script": "Executing script '{script:s}'...",
|
||||
"extracting": "Extracting...",
|
||||
"experimental_feature": "Warning: This feature is experimental and not considered stable, you should not use it unless you know what you are doing.",
|
||||
"field_invalid": "Invalid field '{:s}'",
|
||||
"file_does_not_exist": "The file {path:s} does not exist.",
|
||||
|
@ -323,7 +330,7 @@
|
|||
"good_practices_about_user_password": "You are now about to define a new user password. The password should be at least 8 characters long—though it is good practice to use a longer password (i.e. a passphrase) and/or to a variation of characters (uppercase, lowercase, digits and special characters).",
|
||||
"group_already_exist": "Group {group} already exists",
|
||||
"group_already_exist_on_system": "Group {group} already exists in the system groups",
|
||||
"group_already_exist_on_system_but_removing_it": "Group {group} already exists in the system groups, but YunoHost will remove it…",
|
||||
"group_already_exist_on_system_but_removing_it": "Group {group} already exists in the system groups, but YunoHost will remove it...",
|
||||
"group_created": "Group '{group}' created",
|
||||
"group_creation_failed": "Could not create the group '{group}': {error}",
|
||||
"group_cannot_edit_all_users": "The group 'all_users' cannot be edited manually. It is a special group meant to contain all users registered in YunoHost",
|
||||
|
@ -403,75 +410,50 @@
|
|||
"mail_unavailable": "This e-mail address is reserved and shall be automatically allocated to the very first user",
|
||||
"main_domain_change_failed": "Unable to change the main domain",
|
||||
"main_domain_changed": "The main domain has been changed",
|
||||
"migrate_tsig_end": "Migration to HMAC-SHA-512 finished",
|
||||
"migrate_tsig_failed": "Could not migrate the DynDNS domain '{domain}' to HMAC-SHA-512, rolling back. Error: {error_code}, {error}",
|
||||
"migrate_tsig_start": "Insufficiently secure key algorithm detected for TSIG signature of the domain '{domain}', initiating migration to the more secure HMAC-SHA-512",
|
||||
"migrate_tsig_wait": "Waiting three minutes for the DynDNS server to take the new key into account…",
|
||||
"migrate_tsig_wait_2": "2min…",
|
||||
"migrate_tsig_wait_3": "1min…",
|
||||
"migrate_tsig_wait_4": "30 seconds…",
|
||||
"migrate_tsig_not_needed": "You do not appear to use a DynDNS domain, so no migration is needed.",
|
||||
"migration_description_0001_change_cert_group_to_sslcert": "Change certificates group permissions from 'metronome' to 'ssl-cert'",
|
||||
"migration_description_0002_migrate_to_tsig_sha256": "Improve security of DynDNS TSIG updates by using SHA-512 instead of MD5",
|
||||
"migration_description_0003_migrate_to_stretch": "Upgrade the system to Debian Stretch and YunoHost 3.0",
|
||||
"migration_description_0004_php5_to_php7_pools": "Reconfigure the PHP pools to use PHP 7 instead of 5",
|
||||
"migration_description_0005_postgresql_9p4_to_9p6": "Migrate databases from PostgreSQL 9.4 to 9.6",
|
||||
"migration_description_0006_sync_admin_and_root_passwords": "Synchronize admin and root passwords",
|
||||
"migration_description_0007_ssh_conf_managed_by_yunohost_step1": "Let the SSH configuration be managed by YunoHost (step 1, automatic)",
|
||||
"migration_description_0008_ssh_conf_managed_by_yunohost_step2": "Let the SSH configuration be managed by YunoHost (step 2, manual)",
|
||||
"migration_description_0009_decouple_regenconf_from_services": "Decouple the regen-conf mechanism from services",
|
||||
"migration_description_0010_migrate_to_apps_json": "Remove deprecated apps catalogs and use the new unified 'apps.json' list instead (outdated, replaced by migration 13)",
|
||||
"migration_description_0011_setup_group_permission": "Set up user groups and permissions for apps and services",
|
||||
"migration_description_0012_postgresql_password_to_md5_authentication": "Force PostgreSQL authentication to use MD5 for local connections",
|
||||
"migration_description_0013_futureproof_apps_catalog_system": "Migrate to the new future-proof apps catalog system",
|
||||
"migration_description_0014_remove_app_status_json": "Remove legacy status.json app files",
|
||||
"migration_description_0015_extends_permissions_features_1": "Extends permissions features, step 1",
|
||||
"migration_0003_start": "Starting migration to Stretch. The logs will be available in {logfile}.",
|
||||
"migration_0003_patching_sources_list": "Patching the sources.lists…",
|
||||
"migration_0003_main_upgrade": "Starting main upgrade…",
|
||||
"migration_0003_fail2ban_upgrade": "Starting the Fail2Ban upgrade…",
|
||||
"migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset it to its original state first… The previous file will be available as {backup_dest}.",
|
||||
"migration_0003_yunohost_upgrade": "Starting the YunoHost package upgrade… The migration will end, but the actual upgrade will happen immediately afterwards. After the operation is complete, you might have to log in to the webadmin page again.",
|
||||
"migration_0003_not_jessie": "The current Debian distribution is not Jessie!",
|
||||
"migration_0003_system_not_fully_up_to_date": "Your system is not fully up-to-date. Please perform a regular upgrade before running the migration to Stretch.",
|
||||
"migration_0003_still_on_jessie_after_main_upgrade": "Something went wrong during the main upgrade: Is the system still on Jessie‽ To investigate the issue, please look at {log}:s…",
|
||||
"migration_0003_general_warning": "Please note that this migration is a delicate operation. The YunoHost team did its best to review and test it, but the migration might still break parts of the system or its apps.\n\nTherefore, it is recommended to:\n - Perform a backup of any critical data or app. More info on https://yunohost.org/backup;\n - Be patient after launching the migration: Depending on your Internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external e-mail clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port (465) will automatically be closed, and the new port (587) will be opened in the firewall. You and your users *will* have to adapt the configuration of your e-mail clients accordingly.",
|
||||
"migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an app catalog, or are not flagged as 'working'. Consequently, it cannot be guaranteed that they will still work after the upgrade: {problematic_apps}",
|
||||
"migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten following the upgrade: {manually_modified_files}",
|
||||
"migration_0005_postgresql_94_not_installed": "PostgreSQL was not installed on your system. Nothing to do.",
|
||||
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 is installed, but not postgresql 9.6‽ Something weird might have happened on your system :(…",
|
||||
"migration_0005_not_enough_space": "Make sufficient space available in {path} to run the migration.",
|
||||
"migration_0006_disclaimer": "YunoHost now expects the admin and root passwords to be synchronized. This migration replaces your root password with the admin password.",
|
||||
"migration_0007_cancelled": "Could not improve the way your SSH configuration is managed.",
|
||||
"migration_0007_cannot_restart": "SSH can't be restarted after trying to cancel migration number 6.",
|
||||
"migration_0008_general_disclaimer": "To improve the security of your server, it is recommended to let YunoHost manage the SSH configuration. Your current SSH setup differs from the recommendation. If you let YunoHost reconfigure it, the way you connect to your server through SSH will change thusly:",
|
||||
"migration_0008_port": "• You will have to connect using port 22 instead of your current custom SSH port. Feel free to reconfigure it;",
|
||||
"migration_0008_root": "• You will not be able to connect as root through SSH. Instead you should use the admin user;",
|
||||
"migration_0008_dsa": "• The DSA key will be turned off. Hence, you might need to invalidate a spooky warning from your SSH client, and recheck the fingerprint of your server;",
|
||||
"migration_0008_warning": "If you understand those warnings and want YunoHost to override your current configuration, run the migration. Otherwise, you can also skip the migration, though it is not recommended.",
|
||||
"migration_0008_no_warning": "Overriding your SSH configuration should be safe, though this cannot be promised! Run the migration to override it. Otherwise, you can also skip the migration, though it is not recommended.",
|
||||
"migration_0009_not_needed": "This migration already happened somehow… (?) Skipping.",
|
||||
"migration_0011_backup_before_migration": "Creating a backup of LDAP database and apps settings prior to the actual migration.",
|
||||
"migration_0011_can_not_backup_before_migration": "The backup of the system could not be completed before the migration failed. Error: {error:s}",
|
||||
"migration_description_0015_migrate_to_buster": "Upgrade the system to Debian Buster and YunoHost 4.x",
|
||||
"migration_description_0016_php70_to_php73_pools": "Migrate php7.0-fpm 'pool' conf files to php7.3",
|
||||
"migration_description_0017_postgresql_9p6_to_11": "Migrate databases from PostgreSQL 9.6 to 11",
|
||||
"migration_description_0018_xtable_to_nftable": "Migrate old network traffic rules to the new nftable system",
|
||||
"migration_0011_create_group": "Creating a group for each user…",
|
||||
"migration_0011_done": "Migration completed. You are now able to manage usergroups.",
|
||||
"migration_0011_slapd_config_will_be_overwritten": "It looks like you manually edited the slapd configuration. For this critical migration, YunoHost needs to force the update of the slapd configuration. The original files will be backuped in {conf_backup_folder}.",
|
||||
"migration_0011_LDAP_update_failed": "Could not update LDAP. Error: {error:s}",
|
||||
"migration_0011_migrate_permission": "Migrating permissions from apps settings to LDAP…",
|
||||
"migration_0011_migration_failed_trying_to_rollback": "Could not migrate… trying to roll back the system.",
|
||||
"migration_0011_rollback_success": "System rolled back.",
|
||||
"migration_0011_update_LDAP_database": "Updating LDAP database…",
|
||||
"migration_0011_update_LDAP_schema": "Updating LDAP schema…",
|
||||
"migration_0011_migrate_permission": "Migrating permissions from apps settings to LDAP...",
|
||||
"migration_0011_update_LDAP_database": "Updating LDAP database...",
|
||||
"migration_0011_update_LDAP_schema": "Updating LDAP schema...",
|
||||
"migration_0011_failed_to_remove_stale_object": "Could not remove stale object {dn}: {error}",
|
||||
"migration_0015_add_new_attributes_in_ldap": "Add new attributes for permissions in LDAP database",
|
||||
"migration_0015_migrate_old_app_settings": "Migrate old apps settings 'skipped_uris', 'unprotected_uris', 'protected_uris' in permissions system.",
|
||||
"migration_0015_start" : "Starting migration to Buster",
|
||||
"migration_0015_patching_sources_list": "Patching the sources.lists...",
|
||||
"migration_0015_main_upgrade": "Starting main upgrade...",
|
||||
"migration_0015_still_on_stretch_after_main_upgrade": "Something went wrong during the main upgrade, the system appears to still be on Debian Stretch",
|
||||
"migration_0015_yunohost_upgrade" : "Starting YunoHost core upgrade...",
|
||||
"migration_0015_not_stretch" : "The current Debian distribution is not Stretch!",
|
||||
"migration_0015_not_enough_free_space" : "Free space is pretty low in /var/! You should have at least 1GB free to run this migration.",
|
||||
"migration_0015_system_not_fully_up_to_date": "Your system is not fully up-to-date. Please perform a regular upgrade before running the migration to Buster.",
|
||||
"migration_0015_general_warning": "Please note that this migration is a delicate operation. The YunoHost team did its best to review and test it, but the migration might still break parts of the system or its apps.\n\nTherefore, it is recommended to:\n - Perform a backup of any critical data or app. More info on https://yunohost.org/backup;\n - Be patient after launching the migration: Depending on your Internet connection and hardware, it might take up to a few hours for everything to upgrade.",
|
||||
"migration_0015_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from the YunoHost app catalog, or are not flagged as 'working'. Consequently, it cannot be guaranteed that they will still work after the upgrade: {problematic_apps}",
|
||||
"migration_0015_modified_files": "Please note that the following files were found to be manually modified and might be overwritten following the upgrade: {manually_modified_files}",
|
||||
"migration_0015_specific_upgrade": "Starting upgrade of system packages that needs to be upgrade independently...",
|
||||
"migration_0015_cleaning_up": "Cleaning up cache and packages not useful anymore...",
|
||||
"migration_0015_weak_certs": "The following certificates were found to still use weak signature algorithms and have to be upgraded to be compatible with the next version of nginx: {certs}",
|
||||
"migration_0017_postgresql_96_not_installed": "PostgreSQL was not installed on your system. Nothing to do.",
|
||||
"migration_0017_postgresql_11_not_installed": "PostgreSQL 9.6 is installed, but not postgresql 11‽ Something weird might have happened on your system :(...",
|
||||
"migration_0017_not_enough_space": "Make sufficient space available in {path} to run the migration.",
|
||||
"migration_0018_failed_to_migrate_iptables_rules": "Failed to migrate legacy iptables rules to nftables: {error}",
|
||||
"migration_0018_failed_to_reset_legacy_rules": "Failed to reset legacy iptables rules: {error}",
|
||||
"migration_0019_add_new_attributes_in_ldap": "Add new attributes for permissions in LDAP database",
|
||||
"migration_0019_migrate_old_app_settings": "Migrate old apps settings 'skipped_uris', 'unprotected_uris', 'protected_uris' in permissions system.",
|
||||
"migration_0019_backup_before_migration": "Creating a backup of LDAP database and apps settings prior to the actual migration.",
|
||||
"migration_0019_can_not_backup_before_migration": "The backup of the system could not be completed before the migration failed. Error: {error:s}",
|
||||
"migration_0019_migration_failed_trying_to_rollback": "Could not migrate… trying to roll back the system.",
|
||||
"migration_0019_rollback_success": "System rolled back.",
|
||||
"migration_0019_slapd_config_will_be_overwritten": "It looks like you manually edited the slapd configuration. For this critical migration, YunoHost needs to force the update of the slapd configuration. The original files will be backuped in {conf_backup_folder}.",
|
||||
"migration_0011_update_LDAP_schema": "Updating LDAP schema…",
|
||||
"migrations_already_ran": "Those migrations are already done: {ids}",
|
||||
"migrations_cant_reach_migration_file": "Could not access migrations files at the path '%s'",
|
||||
"migrations_dependencies_not_satisfied": "Run these migrations: '{dependencies_id}', before migration {id}.",
|
||||
"migrations_failed_to_load_migration": "Could not load migration {id}: {error}",
|
||||
"migrations_exclusive_options": "'--auto', '--skip', and '--force-rerun' are mutually exclusive options.",
|
||||
"migrations_list_conflict_pending_done": "You cannot use both '--previous' and '--done' at the same time.",
|
||||
"migrations_loading_migration": "Loading migration {id}…",
|
||||
"migrations_loading_migration": "Loading migration {id}...",
|
||||
"migrations_migration_has_failed": "Migration {id} did not complete, aborting. Error: {exception}",
|
||||
"migrations_must_provide_explicit_targets": "You must provide explicit targets when using '--skip' or '--force-rerun'",
|
||||
"migrations_need_to_accept_disclaimer": "To run the migration {id}, your must accept the following disclaimer:\n---\n{disclaimer}\n---\nIf you accept to run the migration, please re-run the command with the option '--accept-disclaimer'.",
|
||||
|
@ -479,14 +461,12 @@
|
|||
"migrations_no_such_migration": "There is no migration called '{id}'",
|
||||
"migrations_not_pending_cant_skip": "Those migrations are not pending, so cannot be skipped: {ids}",
|
||||
"migrations_pending_cant_rerun": "Those migrations are still pending, so cannot be run again: {ids}",
|
||||
"migrations_running_forward": "Running migration {id}…",
|
||||
"migrations_skip_migration": "Skipping migration {id}…",
|
||||
"migrations_running_forward": "Running migration {id}...",
|
||||
"migrations_skip_migration": "Skipping migration {id}...",
|
||||
"migrations_success_forward": "Migration {id} completed",
|
||||
"migrations_to_be_ran_manually": "Migration {id} has to be run manually. Please go to Tools → Migrations on the webadmin page, or run `yunohost tools migrations migrate`.",
|
||||
"no_internet_connection": "The server is not connected to the Internet",
|
||||
"not_enough_disk_space": "Not enough free space on '{path:s}'",
|
||||
"operation_interrupted": "The operation was manually interrupted?",
|
||||
"package_unknown": "Unknown package '{pkgname}'",
|
||||
"packages_upgrade_failed": "Could not upgrade all the packages",
|
||||
"password_listed": "This password is among the most used passwords in the world. Please choose something more unique.",
|
||||
"password_too_simple_1": "The password needs to be at least 8 characters long",
|
||||
|
@ -535,11 +515,13 @@
|
|||
"regenconf_would_be_updated": "The configuration would have been updated for category '{category}'",
|
||||
"regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for category '{category}'…",
|
||||
"regenconf_failed": "Could not regenerate the configuration for category(s): {categories}",
|
||||
"regenconf_pending_applying": "Applying pending configuration for category '{category}'…",
|
||||
"regenconf_pending_applying": "Applying pending configuration for category '{category}'...",
|
||||
"regenconf_need_to_explicitly_specify_ssh": "The ssh configuration has been manually modified, but you need to explicitly specify category 'ssh' with --force to actually apply the changes.",
|
||||
"regex_incompatible_with_tile": "/!\\ Packagers! For the permission '{permission}' can't set the regex {regex} as main url and set 'show_tile' to 'true'",
|
||||
"regex_with_only_domain": "You can't use a regex for domain, only for path",
|
||||
"restore_already_installed_app": "An app with the ID '{app:s}' is already installed",
|
||||
"restore_app_failed": "Could not restore the app '{app:s}'",
|
||||
"restore_already_installed_apps": "The following apps can't be restored because they are already installed: {apps}",
|
||||
"restore_app_failed": "Could not restore {app:s}",
|
||||
"restore_cleaning_failed": "Could not clean up the temporary restoration directory",
|
||||
"restore_complete": "Restored",
|
||||
"restore_confirm_yunohost_installed": "Do you really want to restore an already installed system? [{answers:s}]",
|
||||
|
@ -571,8 +553,7 @@
|
|||
"service_description_metronome": "Manage XMPP instant messaging accounts",
|
||||
"service_description_mysql": "Stores app data (SQL database)",
|
||||
"service_description_nginx": "Serves or provides access to all the websites hosted on your server",
|
||||
"service_description_nslcd": "Handles YunoHost user shell connection",
|
||||
"service_description_php7.0-fpm": "Runs apps written in PHP with NGINX",
|
||||
"service_description_php7.3-fpm": "Runs apps written in PHP with NGINX",
|
||||
"service_description_postfix": "Used to send and receive e-mails",
|
||||
"service_description_redis-server": "A specialized database used for rapid data access, task queue, and communication between programs",
|
||||
"service_description_rspamd": "Filters spam, and other e-mail related features",
|
||||
|
@ -604,7 +585,7 @@
|
|||
"ssowat_conf_updated": "SSOwat configuration updated",
|
||||
"system_upgraded": "System upgraded",
|
||||
"system_username_exists": "Username already exists in the list of system users",
|
||||
"this_action_broke_dpkg": "This action broke dpkg/APT (the system package managers)… You can try to solve this issue by connecting through SSH and running `sudo dpkg --configure -a`.",
|
||||
"this_action_broke_dpkg": "This action broke dpkg/APT (the system package managers)... You can try to solve this issue by connecting through SSH and running `sudo apt install --fix-broken` and/or `sudo dpkg --configure -a`.",
|
||||
"tools_upgrade_at_least_one": "Please specify '--apps', or '--system'",
|
||||
"tools_upgrade_cant_both": "Cannot upgrade both system and apps at the same time",
|
||||
"tools_upgrade_cant_hold_critical_packages": "Could not hold critical packages…",
|
||||
|
@ -614,16 +595,16 @@
|
|||
"tools_upgrade_special_packages": "Now upgrading 'special' (yunohost-related) packages…",
|
||||
"tools_upgrade_special_packages_explanation": "The special upgrade will continue in the background. Please don't start any other actions on your server for the next ~10 minutes (depending on hardware speed). After this, you may have to re-log in to the webadmin. The upgrade log will be available in Tools → Log (in the webadmin) or using 'yunohost log list' (from the command-line).",
|
||||
"tools_upgrade_special_packages_completed": "YunoHost package upgrade completed.\nPress [Enter] to get the command line back",
|
||||
"unbackup_app": "App '{app:s}' will not be saved",
|
||||
"unbackup_app": "{app:s} will not be saved",
|
||||
"unexpected_error": "Something unexpected went wrong: {error}",
|
||||
"unknown_main_domain_path": "Unknown domain or path for app '{app}'. You need to specify a domain and a path to be able to specify a url for permission.",
|
||||
"unlimit": "No quota",
|
||||
"unrestore_app": "App '{app:s}' will not be restored",
|
||||
"unrestore_app": "{app:s} will not be restored",
|
||||
"update_apt_cache_failed": "Could not to update the cache of APT (Debian's package manager). Here is a dump of the sources.list lines, which might help identify problematic lines: \n{sourceslist}",
|
||||
"update_apt_cache_warning": "Something went wrong while updating the cache of APT (Debian's package manager). Here is a dump of the sources.list lines, which might help identify problematic lines: \n{sourceslist}",
|
||||
"updating_apt_cache": "Fetching available upgrades for system packages…",
|
||||
"updating_apt_cache": "Fetching available upgrades for system packages...",
|
||||
"upgrade_complete": "Upgrade complete",
|
||||
"upgrading_packages": "Upgrading packages…",
|
||||
"upgrading_packages": "Upgrading packages...",
|
||||
"upnp_dev_not_found": "No UPnP device found",
|
||||
"upnp_disabled": "UPnP turned off",
|
||||
"upnp_enabled": "UPnP turned on",
|
||||
|
@ -642,7 +623,7 @@
|
|||
"yunohost_ca_creation_failed": "Could not create certificate authority",
|
||||
"yunohost_ca_creation_success": "Local certification authority created.",
|
||||
"yunohost_configured": "YunoHost is now configured",
|
||||
"yunohost_installing": "Installing YunoHost…",
|
||||
"yunohost_installing": "Installing YunoHost...",
|
||||
"yunohost_not_installed": "YunoHost is not correctly installed. Please run 'yunohost tools postinstall'",
|
||||
"yunohost_postinstall_end_tip": "The post-install completed! To finalize your setup, please consider:\n - adding a first user through the 'Users' section of the webadmin (or 'yunohost user create <username>' in command-line);\n - diagnose potential issues through the 'Diagnosis' section of the webadmin (or 'yunohost diagnosis run' in command-line);\n - reading the 'Finalizing your setup' and 'Getting to know Yunohost' parts in the admin documentation: https://yunohost.org/admindoc."
|
||||
}
|
||||
|
|
143
locales/eo.json
143
locales/eo.json
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"admin_password_change_failed": "Ne eblas ŝanĝi pasvorton",
|
||||
"admin_password_changed": "La pasvorto de administrado ŝanĝiĝis",
|
||||
"admin_password_changed": "La pasvorto de administrado estis ŝanĝita",
|
||||
"app_already_installed": "{app:s} estas jam instalita",
|
||||
"app_already_up_to_date": "{app:s} estas jam ĝisdata",
|
||||
"app_argument_required": "Parametro {name:s} estas bezonata",
|
||||
|
@ -70,7 +70,7 @@
|
|||
"backup_cant_mount_uncompress_archive": "Ne povis munti la nekompresitan ar archiveivon kiel protektita kontraŭ skribo",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "Ĉi tiuj postulataj servoj devas funkcii por funkciigi ĉi tiun agon: {services}. Provu rekomenci ilin por daŭrigi (kaj eble esploru, kial ili malsupreniras).",
|
||||
"backup_copying_to_organize_the_archive": "Kopiante {size:s} MB por organizi la ar archiveivon",
|
||||
"backup_output_directory_forbidden": "Elektu malsaman elirejan dosierujon. Sekurkopioj ne povas esti kreitaj en sub-dosierujoj / bin, / boot, / dev, / ktp, / lib, / root, / run, / sbin, / sys, / usr, / var aŭ /home/yunohost.backup/archives",
|
||||
"backup_output_directory_forbidden": "Elektu malsaman elirejan dosierujon. Sekurkopioj ne povas esti kreitaj en sub-dosierujoj /bin, /boot, /dev, /ktp, /lib, /root, /run, /sbin, /sys, /usr, /var aŭ /home/yunohost.backup/archives",
|
||||
"backup_no_uncompress_archive_dir": "Ne ekzistas tia nekompremita arkiva dosierujo",
|
||||
"password_too_simple_1": "Pasvorto devas esti almenaŭ 8 signojn longa",
|
||||
"app_upgrade_failed": "Ne povis ĝisdatigi {app:s}: {error}",
|
||||
|
@ -81,7 +81,7 @@
|
|||
"backup_archive_name_exists": "Rezerva arkivo kun ĉi tiu nomo jam ekzistas.",
|
||||
"backup_applying_method_tar": "Krei la rezervan TAR-ar archiveivon …",
|
||||
"backup_method_custom_finished": "Propra rezerva metodo '{method:s}' finiĝis",
|
||||
"app_already_installed_cant_change_url": "Ĉi tiu app estas jam instalita. La URL ne povas esti ŝanĝita nur per ĉi tiu funkcio. Rigardu \"app changeurl\" se ĝi haveblas.",
|
||||
"app_already_installed_cant_change_url": "Ĉi tiu app estas jam instalita. La URL ne povas esti ŝanĝita nur per ĉi tiu funkcio. Kontrolu en `app changeurl` se ĝi haveblas.",
|
||||
"app_not_correctly_installed": "{app:s} ŝajnas esti malĝuste instalita",
|
||||
"app_removed": "{app:s} forigita",
|
||||
"backup_delete_error": "Ne povis forigi '{path:s}'",
|
||||
|
@ -107,13 +107,13 @@
|
|||
"app_sources_fetch_failed": "Ne povis akiri fontajn dosierojn, ĉu la URL estas ĝusta?",
|
||||
"ask_new_domain": "Nova domajno",
|
||||
"app_unknown": "Nekonata apliko",
|
||||
"app_not_upgraded": "La aplikaĵo '{failed_app}' ne ĝisdatigis, kaj pro tio la sekvaj ĝisdatigoj de aplikoj estis nuligitaj: {apps}",
|
||||
"app_not_upgraded": "La '{failed_app}' de la programo ne sukcesis ĝisdatigi, kaj sekve la nuntempaj plibonigoj de la sekvaj programoj estis nuligitaj: {apps}",
|
||||
"aborting": "Aborti.",
|
||||
"app_upgraded": "{app:s} altgradigita",
|
||||
"backup_deleted": "Rezerva forigita",
|
||||
"backup_csv_addition_failed": "Ne povis aldoni dosierojn al sekurkopio en la CSV-dosiero",
|
||||
"dpkg_lock_not_available": "Ĉi tiu komando ne povas funkcii nun ĉar alia programo uzas la seruron de dpkg (la administrilo de paka sistemo)",
|
||||
"migration_0003_yunohost_upgrade": "Komenci la ĝisdatigon de YunoHost-pako ... La migrado finiĝos, sed la efektiva ĝisdatigo okazos tuj poste. Post kiam la operacio finiĝos, vi eble devos ensaluti denove sur la retpaĝo.",
|
||||
"migration_0003_yunohost_upgrade": "Komencante la ĝisdatigon de la pakaĵo YunoHost ... La migrado finiĝos, sed la efektiva ĝisdatigo okazos tuj poste. Post kiam la operacio finiĝos, vi eble devos ensaluti al la retpaĝo.",
|
||||
"domain_dyndns_root_unknown": "Nekonata radika domajno DynDNS",
|
||||
"field_invalid": "Nevalida kampo '{:s}'",
|
||||
"log_app_makedefault": "Faru '{}' la defaŭlta apliko",
|
||||
|
@ -124,7 +124,7 @@
|
|||
"global_settings_setting_security_postfix_compatibility": "Kongruo vs sekureca kompromiso por la Postfix-servilo. Afektas la ĉifradojn (kaj aliajn aspektojn pri sekureco)",
|
||||
"group_unknown": "La grupo '{group:s}' estas nekonata",
|
||||
"mailbox_disabled": "Retpoŝto malŝaltita por uzanto {user:s}",
|
||||
"migration_description_0011_setup_group_permission": "Agordu uzantogrupon kaj starigu permeson por programoj kaj servoj",
|
||||
"migration_description_0011_setup_group_permission": "Agordu uzantajn grupojn kaj permesojn por programoj kaj servoj",
|
||||
"migration_0011_backup_before_migration": "Krei sekurkopion de LDAP-datumbazo kaj agordojn antaŭ la efektiva migrado.",
|
||||
"migration_0011_migrate_permission": "Migrado de permesoj de agordoj al aplikoj al LDAP…",
|
||||
"migration_0011_migration_failed_trying_to_rollback": "Ne povis migri ... provante redakti la sistemon.",
|
||||
|
@ -148,8 +148,8 @@
|
|||
"log_user_group_delete": "Forigi grupon '{}'",
|
||||
"log_user_group_update": "Ĝisdatigi grupon '{}'",
|
||||
"migration_0005_postgresql_94_not_installed": "PostgreSQL ne estis instalita en via sistemo. Nenio por fari.",
|
||||
"dyndns_provider_unreachable": "Ne povas atingi Dyndns-provizanton {provider}: ĉu via YunoHost ne estas ĝuste konektita al la interreto aŭ la dynette-servilo malŝaltiĝas.",
|
||||
"good_practices_about_user_password": "Vi nun estas por difini novan uzantan pasvorton. La pasvorto devas esti almenaŭ 8 signoj - kvankam estas bone praktiki uzi pli longan pasvorton (t.e. pasfrazon) kaj / aŭ variaĵon de signoj (majuskloj, minuskloj, ciferoj kaj specialaj signoj).",
|
||||
"dyndns_provider_unreachable": "Ne povas atingi la provizanton DynDNS {provider}: ĉu via YunoHost ne estas ĝuste konektita al la interreto aŭ la dyneta servilo malŝaltiĝas.",
|
||||
"good_practices_about_user_password": "Vi nun estas por difini novan uzantan pasvorton. La pasvorto devas esti almenaŭ 8 signojn - kvankam estas bone praktiki uzi pli longan pasvorton (t.e. pasfrazon) kaj/aŭ variaĵon de signoj (majuskloj, minuskloj, ciferoj kaj specialaj signoj).",
|
||||
"group_updated": "Ĝisdatigita \"{group}\" grupo",
|
||||
"group_already_exist": "Grupo {group} jam ekzistas",
|
||||
"group_already_exist_on_system": "Grupo {group} jam ekzistas en la sistemaj grupoj",
|
||||
|
@ -172,7 +172,7 @@
|
|||
"migrations_already_ran": "Tiuj migradoj estas jam faritaj: {ids}",
|
||||
"migrations_no_such_migration": "Estas neniu migrado nomata '{id}'",
|
||||
"permission_already_allowed": "Grupo '{group}' jam havas rajtigitan permeson '{permission}'",
|
||||
"permission_already_disallowed": "Grupo '{group}' jam havas permeson '{permission}' malebligita'",
|
||||
"permission_already_disallowed": "Grupo '{group}' jam havas permeson '{permission}' malebligita",
|
||||
"permission_cannot_remove_main": "Forigo de ĉefa permeso ne rajtas",
|
||||
"permission_creation_failed": "Ne povis krei permeson '{permission}': {error}",
|
||||
"user_already_exists": "La uzanto '{user}' jam ekzistas",
|
||||
|
@ -186,7 +186,7 @@
|
|||
"permission_not_found": "Permesita \"{permission:s}\" ne trovita",
|
||||
"restore_not_enough_disk_space": "Ne sufiĉa spaco (spaco: {free_space:d} B, necesa spaco: {needed_space:d} B, sekureca marĝeno: {margin:d} B)",
|
||||
"tools_upgrade_regular_packages": "Nun ĝisdatigi 'regulajn' (ne-yunohost-rilatajn) pakojn …",
|
||||
"tools_upgrade_special_packages_explanation": "La speciala ĝisdatigo daŭros en fono. Bonvolu ne komenci aliajn agojn en via servilo la sekvajn ~ 10 minutojn (depende de la aparata rapideco). Post tio, vi eble devos re-ensaluti sur la retadreso. La ĝisdatiga registro estos havebla en Iloj → Ensaluto (en la retadreso) aŭ uzante 'yunohost-logliston' (el la komandlinio).",
|
||||
"tools_upgrade_special_packages_explanation": "La speciala ĝisdatigo daŭros en la fono. Bonvolu ne komenci aliajn agojn en via servilo dum la sekvaj ~ 10 minutoj (depende de la aparata rapideco). Post tio, vi eble devos re-ensaluti al la retadreso. La ĝisdatiga registro estos havebla en Iloj → Ensaluto (en la retadreso) aŭ uzante 'yunohost logliston' (el la komandlinio).",
|
||||
"unrestore_app": "App '{app:s}' ne restarigos",
|
||||
"group_created": "Grupo '{group}' kreita",
|
||||
"group_creation_failed": "Ne povis krei la grupon '{group}': {error}",
|
||||
|
@ -199,7 +199,7 @@
|
|||
"log_user_create": "Aldonu uzanton '{}'",
|
||||
"ip6tables_unavailable": "Vi ne povas ludi kun ip6tabloj ĉi tie. Vi estas en ujo aŭ via kerno ne subtenas ĝin",
|
||||
"mail_unavailable": "Ĉi tiu retpoŝta adreso estas rezervita kaj aŭtomate estos atribuita al la unua uzanto",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "La DNS 'A' rekordo por la domajno '{domain:s}' diferencas de ĉi tiu IP-servilo. Se vi lastatempe modifis vian A-registron, bonvolu atendi ĝin propagandi (iuj DNS-disvastigaj kontroliloj estas disponeblaj interrete). (Se vi scias, kion vi faras, uzu '--no-checks' por malŝalti tiujn ĉekojn.)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "La DNS 'A' rekordo por la domajno '{domain:s}' diferencas de la IP de ĉi tiu servilo. Se vi lastatempe modifis vian A-registron, bonvolu atendi ĝin propagandi (iuj DNS-disvastigaj kontroliloj estas disponeblaj interrete). (Se vi scias, kion vi faras, uzu '--no-checks' por malŝalti tiujn ĉekojn.)",
|
||||
"tools_upgrade_special_packages_completed": "Plenumis la ĝisdatigon de pakaĵoj de YunoHost.\nPremu [Enter] por retrovi la komandlinion",
|
||||
"log_remove_on_failed_install": "Forigu '{}' post malsukcesa instalado",
|
||||
"regenconf_file_manually_modified": "La agorddosiero '{conf}' estis modifita permane kaj ne estos ĝisdatigita",
|
||||
|
@ -211,7 +211,7 @@
|
|||
"migration_description_0006_sync_admin_and_root_passwords": "Sinkronigu admin kaj radikajn pasvortojn",
|
||||
"iptables_unavailable": "Vi ne povas ludi kun iptables ĉi tie. Vi estas en ujo aŭ via kerno ne subtenas ĝin",
|
||||
"global_settings_cant_write_settings": "Ne eblis konservi agordojn, tial: {reason:s}",
|
||||
"service_added": "La servo '{service:s}' aldonis",
|
||||
"service_added": "La servo '{service:s}' estis aldonita",
|
||||
"upnp_disabled": "UPnP malŝaltis",
|
||||
"service_started": "Servo '{service:s}' komenciĝis",
|
||||
"port_already_opened": "Haveno {port:d} estas jam malfermita por {ip_version:s} rilatoj",
|
||||
|
@ -248,7 +248,7 @@
|
|||
"ldap_init_failed_to_create_admin": "LDAP-iniciato ne povis krei administran uzanton",
|
||||
"backup_output_directory_required": "Vi devas provizi elirejan dosierujon por la sekurkopio",
|
||||
"tools_upgrade_cant_unhold_critical_packages": "Ne povis malŝalti kritikajn pakojn…",
|
||||
"log_link_to_log": "Plena ŝtipo de ĉi tiu operacio: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\"> {desc} </a>'",
|
||||
"log_link_to_log": "Plena ŝtipo de ĉi tiu operacio: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc} </a>'",
|
||||
"global_settings_cant_serialize_settings": "Ne eblis serialigi datumojn pri agordoj, motivo: {reason:s}",
|
||||
"backup_running_hooks": "Kurado de apogaj hokoj …",
|
||||
"certmanager_domain_unknown": "Nekonata domajno '{domain:s}'",
|
||||
|
@ -283,7 +283,7 @@
|
|||
"log_operation_unit_unclosed_properly": "Operaciumo ne estis fermita ĝuste",
|
||||
"upgrade_complete": "Ĝisdatigo kompleta",
|
||||
"upnp_enabled": "UPnP ŝaltis",
|
||||
"mailbox_used_space_dovecot_down": "La retpoŝta servo de Dovecot devas funkcii, se vi volas akcepti uzitan poŝtan spacon",
|
||||
"mailbox_used_space_dovecot_down": "La poŝta servo de Dovecot devas funkcii, se vi volas akcepti uzitan poŝtan keston",
|
||||
"restore_system_part_failed": "Ne povis restarigi la sisteman parton '{part:s}'",
|
||||
"service_stop_failed": "Ne povis maldaŭrigi la servon '{service:s}'\n\nLastatempaj servaj protokoloj: {logs:s}",
|
||||
"unbackup_app": "App '{app:s}' ne konserviĝos",
|
||||
|
@ -312,7 +312,7 @@
|
|||
"package_unknown": "Nekonata pako '{pkgname}'",
|
||||
"domain_unknown": "Nekonata domajno",
|
||||
"global_settings_setting_security_password_user_strength": "Uzanto pasvorta forto",
|
||||
"restore_may_be_not_enough_disk_space": "Via sistemo ŝajnas ne havi sufiĉe da spaco (free:{free_space:d} B, necesa spaco: {needed_space:d} B, sekureca marĝeno: {margin:d} B)",
|
||||
"restore_may_be_not_enough_disk_space": "Via sistemo ne ŝajnas havi sufiĉe da spaco (libera: {free_space:d} B, necesa spaco: {needed_space:d} B, sekureca marĝeno: {margin:d} B)",
|
||||
"log_corrupted_md_file": "La YAD-metadata dosiero asociita kun protokoloj estas damaĝita: '{md_file}\nEraro: {error} '",
|
||||
"downloading": "Elŝutante …",
|
||||
"user_deleted": "Uzanto forigita",
|
||||
|
@ -323,7 +323,7 @@
|
|||
"service_description_fail2ban": "Protektas kontraŭ bruta forto kaj aliaj specoj de atakoj de la interreto",
|
||||
"file_does_not_exist": "La dosiero {path:s} ne ekzistas.",
|
||||
"yunohost_not_installed": "YunoHost ne estas ĝuste instalita. Bonvolu prilabori 'yunohost tools postinstall'",
|
||||
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 estas instalita, sed ne postgresql 9.6‽ Io stranga eble okazis en via sistemo: (…",
|
||||
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 estas instalita, sed ne postgresql 9.6‽ Io stranga eble okazis en via sistemo :(…",
|
||||
"restore_removing_tmp_dir_failed": "Ne povis forigi malnovan provizoran dosierujon",
|
||||
"certmanager_cannot_read_cert": "Io malbona okazis, kiam mi provis malfermi aktualan atestilon por domajno {domain:s} (dosiero: {file:s}), kialo: {reason:s}",
|
||||
"service_removed": "Servo '{service:s}' forigita",
|
||||
|
@ -351,7 +351,7 @@
|
|||
"dyndns_ip_update_failed": "Ne povis ĝisdatigi IP-adreson al DynDNS",
|
||||
"migration_description_0004_php5_to_php7_pools": "Rekonfigu la PHP-naĝejojn por uzi PHP 7 anstataŭ 5",
|
||||
"ssowat_conf_updated": "SSOwat-agordo ĝisdatigita",
|
||||
"log_link_to_failed_log": "Ne povis plenumi la operacion '{desc}'. Bonvolu provizi la plenan protokolon de ĉi tiu operacio per <a href=\"#/tools/logs/{name}\"> alklakante ĉi tie </a> por akiri helpon",
|
||||
"log_link_to_failed_log": "Ne povis plenumi la operacion '{desc}'. Bonvolu provizi la plenan protokolon de ĉi tiu operacio per <a href=\"#/tools/logs/{name}\">alklakante ĉi tie </a> por akiri helpon",
|
||||
"user_home_creation_failed": "Ne povis krei dosierujon \"home\" por uzanto",
|
||||
"pattern_backup_archive_name": "Devas esti valida dosiernomo kun maksimume 30 signoj, alfanombraj kaj -_. signoj nur",
|
||||
"restore_cleaning_failed": "Ne eblis purigi la adresaron de provizora restarigo",
|
||||
|
@ -372,7 +372,7 @@
|
|||
"migration_0003_general_warning": "Bonvolu noti, ke ĉi tiu migrado estas delikata operacio. La teamo de YunoHost faris sian plej bonan revizii kaj testi ĝin, sed la migrado eble ankoraŭ rompos partojn de la sistemo aŭ ĝiaj programoj.\n\nTial oni rekomendas al:\n - Elfari kopion de iuj kritikaj datumoj aŭ app. Pliaj informoj pri https://yunohost.org/backup;\n - Paciencu post lanĉo de la migrado: Depende de via interreta konekto kaj aparataro, eble daŭros kelkaj horoj ĝis ĉio ĝisdatigi.\n\nAldone, la haveno por SMTP, uzata de eksteraj retpoŝtaj klientoj (kiel Thunderbird aŭ K9-Mail) estis ŝanĝita de 465 (SSL / TLS) al 587 (STARTTLS). La malnova haveno (465) aŭtomate fermiĝos, kaj la nova haveno (587) malfermiĝos en la fajrejo. Vi kaj viaj uzantoj * devos adapti la agordon de viaj retpoŝtaj klientoj laŭe.",
|
||||
"global_settings_setting_example_int": "Ekzemple int elekto",
|
||||
"backup_output_symlink_dir_broken": "Via arkiva dosierujo '{path:s}' estas rompita ligilo. Eble vi forgesis restarigi aŭ munti aŭ enŝovi la stokadon, al kiu ĝi notas.",
|
||||
"good_practices_about_admin_password": "Vi nun estas por difini novan administran pasvorton. La pasvorto devas esti almenaŭ 8 signoj - kvankam estas bone praktiki uzi pli longan pasvorton (t.e. pasfrazon) kaj / aŭ uzi variaĵon de signoj (majuskloj, minuskloj, ciferoj kaj specialaj signoj).",
|
||||
"good_practices_about_admin_password": "Vi nun estas por difini novan administran pasvorton. La pasvorto devas esti almenaŭ 8 signojn - kvankam estas bone praktiki uzi pli longan pasvorton (t.e. pasfrazon) kaj/aŭ uzi variaĵon de signoj (majuskloj, minuskloj, ciferoj kaj specialaj signoj).",
|
||||
"certmanager_attempt_to_renew_valid_cert": "La atestilo por la domajno '{domain:s}' ne finiĝos! (Vi eble uzos --force se vi scias kion vi faras)",
|
||||
"restore_running_hooks": "Kurantaj restarigaj hokoj…",
|
||||
"regenconf_pending_applying": "Aplikante pritraktata agordo por kategorio '{category}'…",
|
||||
|
@ -387,12 +387,12 @@
|
|||
"migrations_list_conflict_pending_done": "Vi ne povas uzi ambaŭ '--previous' kaj '--done' samtempe.",
|
||||
"server_shutdown_confirm": "La servilo haltos tuj, ĉu vi certas? [{answers:s}]",
|
||||
"log_backup_restore_app": "Restarigu '{}' de rezerva ar archiveivo",
|
||||
"log_does_exists": "Ne estas operacio-registro kun la nomo '{log}', uzu 'yunohost loglist' por vidi ĉiujn disponeblajn operaciojn",
|
||||
"log_does_exists": "Ne estas operacio kun la nomo '{log}', uzu 'yunohost log list' por vidi ĉiujn disponeblajn operaciojn",
|
||||
"service_add_failed": "Ne povis aldoni la servon '{service:s}'",
|
||||
"pattern_password_app": "Bedaŭrinde, pasvortoj ne povas enhavi jenajn signojn: {forbidden_chars}",
|
||||
"this_action_broke_dpkg": "Ĉi tiu ago rompis dpkg / APT (la administrantoj pri la paka sistemo) ... Vi povas provi solvi ĉi tiun problemon per konekto per SSH kaj funkcianta `sudo dpkg --configure -a`.",
|
||||
"log_regen_conf": "Regeneri sistemajn agordojn '{}'",
|
||||
"restore_hook_unavailable": "La restariga skripto por '{part:s}' ne haveblas en via sistemo kaj ankaŭ ne en la ar theivo",
|
||||
"restore_hook_unavailable": "Restariga skripto por '{part:s}' ne haveblas en via sistemo kaj ankaŭ ne en la ar theivo",
|
||||
"log_dyndns_subscribe": "Aboni al YunoHost-subdominio '{}'",
|
||||
"password_too_simple_4": "La pasvorto bezonas almenaŭ 12 signojn kaj enhavas ciferon, majuskle, pli malaltan kaj specialajn signojn",
|
||||
"migration_0003_main_upgrade": "Komencanta ĉefa ĝisdatigo …",
|
||||
|
@ -401,7 +401,7 @@
|
|||
"global_settings_setting_security_nginx_compatibility": "Kongruo vs sekureca kompromiso por la TTT-servilo NGINX. Afektas la ĉifradojn (kaj aliajn aspektojn pri sekureco)",
|
||||
"no_internet_connection": "La servilo ne estas konektita al la interreto",
|
||||
"migration_0008_dsa": "• La DSA-ŝlosilo estos malŝaltita. Tial vi eble bezonos nuligi spuran averton de via SSH-kliento kaj revizii la fingrospuron de via servilo;",
|
||||
"migration_0003_restoring_origin_nginx_conf": "Fileia dosiero /etc/nginx/nginx.conf estis iel redaktita. La migrado reaperos unue al sia originala stato ... La antaŭa dosiero estos havebla kiel {backup_dest}.",
|
||||
"migration_0003_restoring_origin_nginx_conf": "Fileia dosiero /etc/nginx/nginx.conf estis iel redaktita. La migrado unue restarigos ĝin al sia originala stato ... La antaŭa dosiero estos havebla kiel {backup_dest}.",
|
||||
"migrate_tsig_end": "Migrado al HMAC-SHA-512 finiĝis",
|
||||
"restore_complete": "Restarigita",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Ekvilibrigita kiam vi provis ricevi interajn atestilojn de Let's Encrypt. Atestita instalado / renovigo nuligita - bonvolu reprovi poste.",
|
||||
|
@ -432,14 +432,14 @@
|
|||
"certmanager_cert_install_success": "Ni Ĉifru atestilon nun instalitan por la domajno '{domain:s}'",
|
||||
"global_settings_bad_choice_for_enum": "Malbona elekto por agordo {setting:s}, ricevita '{choice:s}', sed disponeblaj elektoj estas: {available_choices:s}",
|
||||
"server_shutdown": "La servilo haltos",
|
||||
"log_tools_migrations_migrate_forward": "Migri antaŭen",
|
||||
"migration_0008_no_warning": "Supersalti vian SSH-agordon estu sekura, kvankam ĉi tio ne povas esti promesita! Ekfunkciu la migradon por superregi ĝin. Alie, vi ankaŭ povas salti la migradon, kvankam ĝi ne rekomendas.",
|
||||
"log_tools_migrations_migrate_forward": "Kuru migradoj",
|
||||
"migration_0008_no_warning": "Supersalti vian SSH-agordon estu sekura, kvankam tio ne povas esti promesita! Ekfunkciu la migradon por superregi ĝin. Alie, vi ankaŭ povas salti la migradon, kvankam ĝi ne rekomendas.",
|
||||
"regenconf_now_managed_by_yunohost": "La agorda dosiero '{conf}' nun estas administrata de YunoHost (kategorio {category}).",
|
||||
"server_reboot_confirm": "Ĉu la servilo rekomencos tuj, ĉu vi certas? [{answers:s}]",
|
||||
"log_app_install": "Instalu la aplikon '{}'",
|
||||
"service_description_dnsmasq": "Traktas rezolucion de domajna nomo (DNS)",
|
||||
"global_settings_unknown_type": "Neatendita situacio, la agordo {setting:s} ŝajnas havi la tipon {unknown_type:s} sed ĝi ne estas tipo subtenata de la sistemo.",
|
||||
"migration_0003_problematic_apps_warning": "Bonvolu noti, ke la sekvaj eventuale problemaj instalitaj programoj estis detektitaj. Ŝajnas, ke tiuj ne estis instalitaj el app_katalogo aŭ ne estas markitaj kiel \"funkciantaj\". Tial ne eblas garantii, ke ili ankoraŭ funkcios post la ĝisdatigo: {problematic_apps}",
|
||||
"migration_0003_problematic_apps_warning": "Bonvolu noti, ke la sekvaj eventuale problemaj instalitaj programoj estis detektitaj. Ŝajnas, ke tiuj ne estis instalitaj el aplika katalogo aŭ ne estas markitaj kiel \"funkciantaj\". Tial ne eblas garantii, ke ili ankoraŭ funkcios post la ĝisdatigo: {problematic_apps}",
|
||||
"domain_hostname_failed": "Ne povis agordi novan gastigilon. Ĉi tio eble kaŭzos problemon poste (eble bone).",
|
||||
"server_reboot": "La servilo rekomenciĝos",
|
||||
"regenconf_failed": "Ne povis regeneri la agordon por kategorio(j): {categories}",
|
||||
|
@ -497,28 +497,28 @@
|
|||
"app_install_failed": "Ne povis instali {app} : {error}",
|
||||
"app_install_script_failed": "Eraro okazis en la skripto de instalado de la app",
|
||||
"app_remove_after_failed_install": "Forigado de la app post la instala fiasko …",
|
||||
"diagnosis_basesystem_host": "Servilo funkcias Debian {debian_version}.",
|
||||
"diagnosis_basesystem_host": "Servilo funkcias Debian {debian_version}",
|
||||
"apps_catalog_init_success": "Aplikoj katalogsistemo inicializita !",
|
||||
"apps_catalog_updating": "Ĝisdatigante katalogo de aplikoj ...",
|
||||
"apps_catalog_updating": "Ĝisdatigante katalogo de aplikoj …",
|
||||
"apps_catalog_failed_to_download": "Ne eblas elŝuti la katalogon de {apps_catalog}: {error}",
|
||||
"apps_catalog_obsolete_cache": "La kaŝmemoro de la katalogo de programoj estas malplena aŭ malaktuala.",
|
||||
"apps_catalog_obsolete_cache": "La kaŝmemoro de la aplika katalogo estas malplena aŭ malaktuala.",
|
||||
"apps_catalog_update_success": "La aplika katalogo estis ĝisdatigita!",
|
||||
"diagnosis_basesystem_kernel": "Servilo funkcias Linuksan kernon {kernel_version}",
|
||||
"diagnosis_basesystem_ynh_single_version": "{package} versio: {version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_main_version": "Servilo funkcias YunoHost {main_version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_inconsistent_versions": "Vi prizorgas malkonsekvencajn versiojn de la YunoHost-pakoj... plej probable pro malsukcesa aŭ parta ĝisdatigo.",
|
||||
"diagnosis_display_tip_web": "Vi povas iri al la sekcio Diagnozo (en la hejmekrano) por vidi la trovitajn problemojn.",
|
||||
"diagnosis_cache_still_valid": "(Kaŝmemoro ankoraŭ validas por {category} diagnozo. Ankoraŭ ne re-diagnoza!)",
|
||||
"diagnosis_cache_still_valid": "(La kaŝmemoro ankoraŭ validas por {category} diagnozo. Vi ankoraŭ ne diagnozas ĝin!)",
|
||||
"diagnosis_cant_run_because_of_dep": "Ne eblas fari diagnozon por {category} dum estas gravaj problemoj rilataj al {dep}.",
|
||||
"diagnosis_display_tip_cli": "Vi povas aranĝi 'yunohost diagnosis show --issues' por aperigi la trovitajn problemojn.",
|
||||
"diagnosis_failed_for_category": "Diagnozo malsukcesis por kategorio '{category}': {error}",
|
||||
"app_upgrade_script_failed": "Eraro okazis en la skripto pri ĝisdatiga programo",
|
||||
"diagnosis_diskusage_verylow": "Stokado {mountpoint} (sur aparato {device)) restas nur {free} ({free_percent}%) spaco. Vi vere konsideru purigi iom da spaco.",
|
||||
"diagnosis_diskusage_verylow": "Stokado <code>{mountpoint}</code> (sur aparato <code> {device} </code>) nur restas {free} ({free_percent}%) spaco restanta (el {total}). Vi vere konsideru purigi iom da spaco !",
|
||||
"diagnosis_ram_verylow": "La sistemo nur restas {available} ({available_percent}%) RAM! (el {total})",
|
||||
"diagnosis_mail_outgoing_port_25_blocked": "Eliranta haveno 25 ŝajnas esti blokita. Vi devas provi malŝlosi ĝin en via agorda panelo de provizanto (aŭ gastiganto). Dume la servilo ne povos sendi retpoŝtojn al aliaj serviloj.",
|
||||
"diagnosis_http_bad_status_code": "Ne povis atingi vian servilon kiel atendite, ĝi redonis malbonan statuskodon. Povas esti, ke alia maŝino respondis anstataŭ via servilo. Vi devus kontroli, ke vi ĝuste redonas la havenon 80, ke via nginx-agordo ĝisdatigas kaj ke reverso-prokuro ne interbatalas.",
|
||||
"diagnosis_http_bad_status_code": "Ĝi aspektas kiel alia maŝino (eble via interreta enkursigilo) respondita anstataŭ via servilo.<br>1. La plej ofta kaŭzo por ĉi tiu afero estas, ke la haveno 80 (kaj 443) <a href='https://yunohost.org/isp_box_config'> ne estas ĝuste senditaj al via servilo </a>.<br>2. Pri pli kompleksaj agordoj: certigu, ke neniu fajroŝirmilo aŭ reverso-prokuro ne interbatalas.",
|
||||
"main_domain_changed": "La ĉefa domajno estis ŝanĝita",
|
||||
"yunohost_postinstall_end_tip": "La post-instalado finiĝis! Por fini vian agordon, bonvolu konsideri:\n - aldonado de unua uzanto tra la sekcio 'Uzantoj' de la retadreso (aŭ 'yunohost user create <username>' en komandlinio);\n - diagnozi problemojn atendantajn solvi por ke via servilo funkciu kiel eble plej glate tra la sekcio 'Diagnosis' de la retadministrado (aŭ 'yunohost diagnosis run' en komandlinio);\n - legante la partojn 'Finigi vian agordon' kaj 'Ekkoni Yunohost' en la administra dokumentado: https://yunohost.org/admindoc.",
|
||||
"yunohost_postinstall_end_tip": "La post-instalado finiĝis! Por fini vian agordon, bonvolu konsideri:\n - aldonado de unua uzanto tra la sekcio 'Uzantoj' de la retadreso (aŭ 'uzanto de yunohost kreu <uzantnomon>' en komandlinio);\n - diagnozi eblajn problemojn per la sekcio 'Diagnozo' de la reteja administrado (aŭ 'diagnoza yunohost-ekzekuto' en komandlinio);\n - legante la partojn 'Finigi vian agordon' kaj 'Ekkoni Yunohost' en la administra dokumentado: https://yunohost.org/admindoc.",
|
||||
"migration_description_0014_remove_app_status_json": "Forigi heredajn dosierojn",
|
||||
"diagnosis_ip_connected_ipv4": "La servilo estas konektita al la interreto per IPv4 !",
|
||||
"diagnosis_ip_no_ipv4": "La servilo ne havas funkciantan IPv4.",
|
||||
|
@ -526,21 +526,21 @@
|
|||
"diagnosis_ip_no_ipv6": "La servilo ne havas funkciantan IPv6.",
|
||||
"diagnosis_ip_not_connected_at_all": "La servilo tute ne ŝajnas esti konektita al la Interreto !?",
|
||||
"diagnosis_ip_dnsresolution_working": "Rezolucio pri domajna nomo funkcias !",
|
||||
"diagnosis_ip_weird_resolvconf": "DNS-rezolucio ŝajnas funkcii, sed atentu, ke vi ŝajnas uzi kutimon /etc/resolv.conf.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "Anstataŭe, ĉi tiu dosiero estu ligilo kun /etc/resolvconf/run/resolv.conf mem montrante al 127.0.0.1 (dnsmasq). La efektivaj solvantoj devas agordi per /etc/resolv.dnsmasq.conf.",
|
||||
"diagnosis_dns_good_conf": "Bona DNS-agordo por domajno {domain} (kategorio {category})",
|
||||
"diagnosis_dns_bad_conf": "Malbona / mankas DNS-agordo por domajno {domain} (kategorio {category})",
|
||||
"diagnosis_ip_weird_resolvconf": "DNS-rezolucio ŝajnas funkcii, sed ŝajnas ke vi uzas kutiman <code>/etc/resolv.conf </code>.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "La dosiero <code>/etc/resolv.conf</code> devas esti ligilo al <code> /etc/resolvconf/run/resolv.conf </code> indikante <code> 127.0.0.1 </code> (dnsmasq). Se vi volas permane agordi DNS-solvilojn, bonvolu redakti <code> /etc/resolv.dnsmasq.conf </code>.",
|
||||
"diagnosis_dns_good_conf": "DNS-registroj estas ĝuste agorditaj por domajno {domain} (kategorio {category})",
|
||||
"diagnosis_dns_bad_conf": "Iuj DNS-registroj mankas aŭ malĝustas por domajno {domain} (kategorio {category})",
|
||||
"diagnosis_ram_ok": "La sistemo ankoraŭ havas {available} ({available_percent}%) RAM forlasita de {total}.",
|
||||
"diagnosis_swap_none": "La sistemo tute ne havas interŝanĝon. Vi devus pripensi aldoni almenaŭ {recommended} da interŝanĝo por eviti situaciojn en kiuj la sistemo restas sen memoro.",
|
||||
"diagnosis_swap_notsomuch": "La sistemo havas nur {total}-interŝanĝon. Vi konsideru havi almenaŭ {recommended} por eviti situaciojn en kiuj la sistemo restas sen memoro.",
|
||||
"diagnosis_regenconf_manually_modified_details": "Ĉi tio probable estas bona tiel longe kiel vi scias kion vi faras;)!",
|
||||
"diagnosis_regenconf_manually_modified_details": "Ĉi tio probable estas bona, se vi scias, kion vi faras! YunoHost ĉesigos ĝisdatigi ĉi tiun dosieron aŭtomate ... Sed atentu, ke YunoHost-ĝisdatigoj povus enhavi gravajn rekomendajn ŝanĝojn. Se vi volas, vi povas inspekti la diferencojn per <cmd>yyunohost tools regen-conf {category} --dry-run --with-diff</cmd> kaj devigi la reset al la rekomendita agordo per <cmd>yunohost tools regen-conf {category} --force</cmd>",
|
||||
"diagnosis_regenconf_manually_modified_debian": "Agordodosiero {file} estis modifita permane kompare kun la defaŭlta Debian.",
|
||||
"diagnosis_regenconf_manually_modified_debian_details": "Ĉi tio probable estas bona, sed devas observi ĝin...",
|
||||
"diagnosis_security_all_good": "Neniu kritika sekureca vundebleco estis trovita.",
|
||||
"diagnosis_security_vulnerable_to_meltdown": "Vi ŝajnas vundebla al la kritiko-vundebleco de Meltdown",
|
||||
"diagnosis_no_cache": "Neniu diagnoza kaŝmemoro por kategorio '{category}'",
|
||||
"diagnosis_ip_broken_dnsresolution": "Rezolucio pri domajna nomo rompiĝas pro iu kialo ... Ĉu fajroŝirmilo blokas DNS-petojn ?",
|
||||
"diagnosis_ip_broken_resolvconf": "Rezolucio pri domajna nomo ŝajnas esti rompita en via servilo, kiu ŝajnas rilata al /etc/resolv.conf ne notante 127.0.0.1.",
|
||||
"diagnosis_ip_broken_dnsresolution": "Rezolucio pri domajna nomo rompiĝas pro iu kialo... Ĉu fajroŝirmilo blokas DNS-petojn ?",
|
||||
"diagnosis_ip_broken_resolvconf": "Rezolucio pri domajna nomo estas rompita en via servilo, kiu ŝajnas rilata al <code>/etc/resolv.conf</code> ne montrante al <code>127.0.0.1 </code>.",
|
||||
"diagnosis_dns_missing_record": "Laŭ la rekomendita DNS-agordo, vi devas aldoni DNS-registron kun\ntipo: {type}\nnomo: {name}\nvaloro: {value}",
|
||||
"diagnosis_dns_discrepancy": "La DNS-registro kun tipo {type} kaj nomo {name} ne kongruas kun la rekomendita agordo.\nNuna valoro: {current}\nEsceptita valoro: {value}",
|
||||
"diagnosis_services_conf_broken": "Agordo estas rompita por servo {service} !",
|
||||
|
@ -549,7 +549,7 @@
|
|||
"diagnosis_swap_ok": "La sistemo havas {total} da interŝanĝoj!",
|
||||
"diagnosis_mail_ougoing_port_25_ok": "Eliranta haveno 25 ne estas blokita kaj retpoŝto povas esti sendita al aliaj serviloj.",
|
||||
"diagnosis_regenconf_allgood": "Ĉiuj agordaj dosieroj kongruas kun la rekomendita agordo!",
|
||||
"diagnosis_regenconf_manually_modified": "Agordodosiero {file} estis permane modifita.",
|
||||
"diagnosis_regenconf_manually_modified": "Agordodosiero <code>{file}</code> ŝajnas esti permane modifita.",
|
||||
"diagnosis_description_ip": "Interreta konektebleco",
|
||||
"diagnosis_description_dnsrecords": "Registroj DNS",
|
||||
"diagnosis_description_services": "Servo kontrolas staton",
|
||||
|
@ -557,31 +557,31 @@
|
|||
"diagnosis_description_security": "Sekurecaj kontroloj",
|
||||
"diagnosis_ports_could_not_diagnose": "Ne povis diagnozi, ĉu haveblaj havenoj de ekstere.",
|
||||
"diagnosis_ports_could_not_diagnose_details": "Eraro: {error}",
|
||||
"diagnosis_services_bad_status_tip": "Vi povas provi rekomenci la servon, kaj se ĝi ne funkcias, trarigardu la servajn protokolojn uzante 'yunohost service log {service}' aŭ tra la sekcio 'Servoj' de la retadreso.",
|
||||
"diagnosis_services_bad_status_tip": "Vi povas provi <a href='#/services/{service}'>rekomenci la servon </a>, kaj se ĝi ne funkcias, rigardu <a href='#/services/{service}'>La servaj registroj en reteja</a> (el la komandlinio, vi povas fari tion per <cmd>yunohost service restart {service}</cmd> kaj<cmd>yunohost service log {service}</cmd>).",
|
||||
"diagnosis_security_vulnerable_to_meltdown_details": "Por ripari tion, vi devas ĝisdatigi vian sistemon kaj rekomenci por ŝarĝi la novan linux-kernon (aŭ kontaktu vian servilan provizanton se ĉi tio ne funkcias). Vidu https://meltdownattack.com/ por pliaj informoj.",
|
||||
"diagnosis_description_basesystem": "Baza sistemo",
|
||||
"diagnosis_description_regenconf": "Sistemaj agordoj",
|
||||
"main_domain_change_failed": "Ne eblas ŝanĝi la ĉefan domajnon",
|
||||
"log_domain_main_domain": "Faru '{}' kiel ĉefa domajno",
|
||||
"diagnosis_http_timeout": "Tempolimigita dum provado kontakti vian servilon de ekstere. Ĝi ŝajnas esti neatingebla. Vi devus kontroli, ke vi ĝuste redonas la havenon 80, ke nginx funkcias kaj ke fajroŝirmilo ne interbatalas.",
|
||||
"log_domain_main_domain": "Faru de '{}' la ĉefa domajno",
|
||||
"diagnosis_http_timeout": "Tempolimigita dum provado kontakti vian servilon de ekstere. Ĝi ŝajnas esti neatingebla.<br>1. La plej ofta kaŭzo por ĉi tiu afero estas, ke la haveno 80 (kaj 443) <a href='https://yunohost.org/isp_box_config'>ne estas ĝuste senditaj al via servilo</a>.<br>2. Vi ankaŭ devas certigi, ke la servo nginx funkcias<br>3. Pri pli kompleksaj agordoj: certigu, ke neniu fajroŝirmilo aŭ reverso-prokuro ne interbatalas.",
|
||||
"diagnosis_http_connection_error": "Rilata eraro: ne povis konektiĝi al la petita domajno, tre probable ĝi estas neatingebla.",
|
||||
"migration_description_0013_futureproof_apps_catalog_system": "Migru al la nova katalogosistemo pri estontecaj programoj",
|
||||
"diagnosis_ignored_issues": "(+ {nb_ignored} ignorataj aferoj))",
|
||||
"diagnosis_found_errors": "Trovis {errors} signifa(j) afero(j) rilata al {category}!",
|
||||
"diagnosis_found_errors_and_warnings": "Trovis {errors} signifaj problemo (j) (kaj {warnings} averto) rilataj al {category}!",
|
||||
"diagnosis_diskusage_low": "Stokado {mountpoint} (sur aparato {device)) restas nur {free} ({free_percent}%) spaco. Estu zorgema.",
|
||||
"diagnosis_diskusage_ok": "Stokado {mountpoint} (sur aparato {device) ankoraŭ restas {free} ({free_percent}%) spaco!",
|
||||
"diagnosis_diskusage_low": "Stokado <code>{mountpoint}</code> (sur aparato <code>{device}</code>) nur restas {free} ({free_percent}%) spaco restanta (el {total}). Estu zorgema.",
|
||||
"diagnosis_diskusage_ok": "Stokado <code>{mountpoint}</code> (sur aparato <code>{device}</code>) ankoraŭ restas {free} ({free_percent}%) spaco (el {total})!",
|
||||
"global_settings_setting_pop3_enabled": "Ebligu la protokolon POP3 por la poŝta servilo",
|
||||
"diagnosis_unknown_categories": "La jenaj kategorioj estas nekonataj: {categories}",
|
||||
"diagnosis_services_running": "Servo {service} funkcias!",
|
||||
"diagnosis_ports_unreachable": "Haveno {port} ne atingeblas de ekstere.",
|
||||
"diagnosis_ports_ok": "Haveno {port} atingeblas de ekstere.",
|
||||
"diagnosis_ports_needed_by": "Eksponi ĉi tiun havenon necesas por servo {service}",
|
||||
"diagnosis_ports_forwarding_tip": "Por solvi ĉi tiun problemon, plej probable vi devas agordi la plusendon de haveno en via interreta enkursigilo kiel priskribite en https://yunohost.org/isp_box_config",
|
||||
"diagnosis_ports_needed_by": "Eksponi ĉi tiun havenon necesas por {category} funkcioj (servo {service})",
|
||||
"diagnosis_ports_forwarding_tip": "Por solvi ĉi tiun problemon, vi plej verŝajne devas agordi la plusendon de haveno en via interreta enkursigilo kiel priskribite en <a href='https://yunohost.org/isp_box_config'>https://yunohost.org/isp_box_config</a>",
|
||||
"diagnosis_http_could_not_diagnose": "Ne povis diagnozi, ĉu atingeblas domajno de ekstere.",
|
||||
"diagnosis_http_could_not_diagnose_details": "Eraro: {error}",
|
||||
"diagnosis_http_ok": "Domajno {domain} atingeblas de ekstere.",
|
||||
"diagnosis_http_unreachable": "Domajno {domain} estas atingebla per HTTP de ekstere.",
|
||||
"diagnosis_http_ok": "Domajno {domain} atingebla per HTTP de ekster la loka reto.",
|
||||
"diagnosis_http_unreachable": "Domajno {domain} ŝajnas neatingebla per HTTP de ekster la loka reto.",
|
||||
"domain_cannot_remove_main_add_new_one": "Vi ne povas forigi '{domain:s}' ĉar ĝi estas la ĉefa domajno kaj via sola domajno, vi devas unue aldoni alian domajnon uzante ''yunohost domain add <another-domain.com>', tiam agordi kiel ĉefan domajnon uzante 'yunohost domain main-domain -n <another-domain.com>' kaj tiam vi povas forigi la domajnon' {domain:s} 'uzante' yunohost domain remove {domain:s} '.'",
|
||||
"permission_require_account": "Permesilo {permission} nur havas sencon por uzantoj, kiuj havas konton, kaj tial ne rajtas esti ebligitaj por vizitantoj.",
|
||||
"diagnosis_found_warnings": "Trovitaj {warnings} ero (j) kiuj povus esti plibonigitaj por {category}.",
|
||||
|
@ -591,5 +591,50 @@
|
|||
"diagnosis_description_mail": "Retpoŝto",
|
||||
"log_app_action_run": "Funkciigu agon de la apliko '{}'",
|
||||
"log_app_config_show_panel": "Montri la agordan panelon de la apliko '{}'",
|
||||
"log_app_config_apply": "Apliki agordon al la apliko '{}'"
|
||||
"log_app_config_apply": "Apliki agordon al la apliko '{}'",
|
||||
"diagnosis_never_ran_yet": "Ŝajnas, ke ĉi tiu servilo estis instalita antaŭ nelonge kaj estas neniu diagnoza raporto por montri. Vi devas komenci kurante plenan diagnozon, ĉu de la retadministro aŭ uzante 'yunohost diagnosis run' el la komandlinio.",
|
||||
"certmanager_warning_subdomain_dns_record": "Subdominio '{subdomain:s}' ne solvas al la sama IP-adreso kiel '{domain:s}'. Iuj funkcioj ne estos haveblaj ĝis vi riparos ĉi tion kaj regeneros la atestilon.",
|
||||
"diagnosis_basesystem_hardware": "Arkitekturo de servila aparataro estas {virt} {arch}",
|
||||
"diagnosis_basesystem_hardware_board": "Servilo-tabulo-modelo estas {model}",
|
||||
"diagnosis_description_web": "Reta",
|
||||
"domain_cannot_add_xmpp_upload": "Vi ne povas aldoni domajnojn per 'xmpp-upload'. Ĉi tiu speco de nomo estas rezervita por la XMPP-alŝuta funkcio integrita en YunoHost.",
|
||||
"group_already_exist_on_system_but_removing_it": "Grupo {group} jam ekzistas en la sistemaj grupoj, sed YunoHost forigos ĝin …",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Iuj provizantoj ne lasos vin malŝlosi elirantan havenon 25 ĉar ili ne zorgas pri Neta Neŭtraleco.<br>- Iuj el ili provizas la alternativon de <a href='https://yunohost.org/#/smtp_relay'>uzante retpoŝtan servilon</a> kvankam ĝi implicas, ke la relajso povos spioni vian retpoŝtan trafikon.<br>- Amika privateco estas uzi VPN * kun dediĉita publika IP * por pretervidi ĉi tiun specon. de limoj. Vidu <a href='https://yunohost.org/#/vpn_avantage'>https://yunohost.org/#/vpn_avantage</a><br>- Vi ankaŭ povas konsideri ŝanĝi al <a href='https://yunohost.org/#/isp'>pli neta neŭtraleco-amika provizanto</a>",
|
||||
"diagnosis_mail_fcrdns_nok_details": "Vi unue provu agordi la inversan DNS kun <code>{ehlo_domain}</code> en via interreta enkursigilo aŭ en via retprovizanta interfaco. (Iuj gastigantaj provizantoj eble postulas, ke vi sendu al ili subtenan bileton por ĉi tio).",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_4": "Iuj provizantoj ne lasos vin agordi vian inversan DNS (aŭ ilia funkcio povus esti rompita ...). Se vi spertas problemojn pro tio, konsideru jenajn solvojn:<br>- Iuj ISP provizas la alternativon de <a href='https://yunohost.org/#/smtp_relay'>uzante retpoŝtan servilon</a> kvankam ĝi implicas, ke la relajso povos spioni vian retpoŝtan trafikon.<br>- Interreta privateco estas uzi VPN * kun dediĉita publika IP * por preterpasi ĉi tiajn limojn. Vidu <a href='https://yunohost.org/#/vpn_avantage'>https://yunohost.org/#/vpn_avantage</a><br>- Finfine eblas ankaŭ <a href='https://yunohost.org/#/isp'>ŝanĝo de provizanto</a>",
|
||||
"diagnosis_display_tip": "Por vidi la trovitajn problemojn, vi povas iri al la sekcio pri Diagnozo de la reteja administrado, aŭ funkcii \"yunohost diagnosis show --issues\" el la komandlinio.",
|
||||
"diagnosis_ip_global": "Tutmonda IP: <code>{global} </code>",
|
||||
"diagnosis_ip_local": "Loka IP: <code>{local} </code>",
|
||||
"diagnosis_dns_point_to_doc": "Bonvolu kontroli la dokumentaron ĉe <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> se vi bezonas helpon pri agordo de DNS-registroj.",
|
||||
"diagnosis_mail_outgoing_port_25_ok": "La SMTP-poŝta servilo kapablas sendi retpoŝtojn (eliranta haveno 25 ne estas blokita).",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_details": "Vi unue provu malŝlosi elirantan havenon 25 en via interreta enkursigilo aŭ en via retprovizanta interfaco. (Iuj gastigantaj provizantoj eble postulas, ke vi sendu al ili subtenan bileton por ĉi tio).",
|
||||
"diagnosis_mail_ehlo_unreachable": "La SMTP-poŝta servilo estas neatingebla de ekstere sur IPv {ipversion}. Ĝi ne povos ricevi retpoŝtojn.",
|
||||
"diagnosis_mail_ehlo_ok": "La SMTP-poŝta servilo atingeblas de ekstere kaj tial kapablas ricevi retpoŝtojn !",
|
||||
"diagnosis_mail_ehlo_unreachable_details": "Ne povis malfermi rilaton sur la haveno 25 al via servilo en IPv {ipversion}. Ĝi ŝajnas esti neatingebla.<br>1. La plej ofta kaŭzo por ĉi tiu afero estas, ke la haveno 25 <a href='https://yunohost.org/isp_box_config'>ne estas ĝuste sendita al via servilo </a>.<br>2. Vi ankaŭ devas certigi, ke servo-prefikso funkcias.<br>3. Pri pli kompleksaj agordoj: certigu, ke neniu fajroŝirmilo aŭ reverso-prokuro ne interbatalas.",
|
||||
"diagnosis_mail_ehlo_bad_answer": "Ne-SMTP-servo respondita sur la haveno 25 sur IPv {ipversion}",
|
||||
"diagnosis_mail_ehlo_bad_answer_details": "Povas esti ke alia maŝino respondas anstataŭ via servilo.",
|
||||
"diagnosis_mail_ehlo_wrong": "Malsama SMTP-poŝta servilo respondas pri IPv {ipversion}. Via servilo probable ne povos ricevi retpoŝtojn.",
|
||||
"diagnosis_mail_ehlo_wrong_details": "La EHLO ricevita de la fora diagnozilo en IPv {ipversion} diferencas de la domajno de via servilo.<br>Ricevita EHLO: <code>{wrong_ehlo} </code><br>Atendita: <code>{right_ehlo} </code><br>La plej ofta kaŭzo por ĉi tiu afero estas, ke la haveno 25 <a href='https://yunohost.org/isp_box_config'>ne estas ĝuste sendita al via servilo </a>. Alternative, certigu, ke neniu fajroŝirmilo aŭ reverso-prokuro ne interbatalas.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose": "Ne povis diagnozi ĉu postfiksa poŝta servilo atingebla de ekstere en IPv {ipversion}.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose_details": "Eraro: {error}",
|
||||
"diagnosis_mail_fcrdns_ok": "Via inversa DNS estas ĝuste agordita!",
|
||||
"diagnosis_mail_fcrdns_dns_missing": "Neniu inversa DNS estas difinita en IPv {ipversion}. Iuj retpoŝtoj povas malsukcesi liveri aŭ povus esti markitaj kiel spamo.",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_6": "Iuj provizantoj ne lasos vin agordi vian inversan DNS (aŭ ilia funkcio povus esti rompita ...). Se via inversa DNS estas ĝuste agordita por IPv4, vi povas provi malebligi la uzon de IPv6 kiam vi sendas retpoŝtojn per funkciado <cmd>yunohost-agordoj set smtp.allow_ipv6 -v off </cmd>. Noto: ĉi tiu lasta solvo signifas, ke vi ne povos sendi aŭ ricevi retpoŝtojn de la malmultaj IPv6-nur serviloj tie.",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain": "La inversa DNS ne ĝuste agordis en IPv {ipversion}. Iuj retpoŝtoj povas malsukcesi liveri aŭ povus esti markitaj kiel spamo.",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "Aktuala reverso DNS: <code>{rdns_domain}</code><br>Atendita valoro: <code>{ehlo_domain}</code>",
|
||||
"diagnosis_mail_blacklist_ok": "La IP kaj domajnoj uzataj de ĉi tiu servilo ne ŝajnas esti listigitaj nigre",
|
||||
"diagnosis_mail_blacklist_listed_by": "Via IP aŭ domajno <code>{item}</code> estas listigita en {blacklist_name}",
|
||||
"diagnosis_mail_blacklist_reason": "La negra listo estas: {reason}",
|
||||
"diagnosis_mail_blacklist_website": "Post identigi kial vi listigas kaj riparis ĝin, bonvolu peti forigi vian IP aŭ domenion sur {blacklist_website}",
|
||||
"diagnosis_mail_queue_ok": "{nb_pending} pritraktataj retpoŝtoj en la retpoŝtaj vostoj",
|
||||
"diagnosis_mail_queue_unavailable": "Ne povas konsulti multajn pritraktitajn retpoŝtojn en vosto",
|
||||
"diagnosis_mail_queue_unavailable_details": "Eraro: {error}",
|
||||
"diagnosis_mail_queue_too_big": "Tro multaj pritraktataj retpoŝtoj en retpoŝto ({nb_pending} retpoŝtoj)",
|
||||
"diagnosis_ports_partially_unreachable": "Haveno {port} ne atingebla de ekstere en IPv {failed}.",
|
||||
"diagnosis_http_hairpinning_issue": "Via loka reto ŝajne ne havas haŭtadon.",
|
||||
"diagnosis_http_hairpinning_issue_details": "Ĉi tio probable estas pro via ISP-skatolo / enkursigilo. Rezulte, homoj de ekster via loka reto povos aliri vian servilon kiel atendite, sed ne homoj de interne de la loka reto (kiel vi, probable?) Kiam uzas la domajnan nomon aŭ tutmondan IP. Eble vi povas plibonigi la situacion per rigardado al <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a>",
|
||||
"diagnosis_http_partially_unreachable": "Domajno {domain} ŝajnas neatingebla per HTTP de ekster la loka reto en IPv {failed}, kvankam ĝi funkcias en IPv {passed}.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date": "La nginx-agordo de ĉi tiu domajno ŝajnas esti modifita permane, kaj malhelpas YunoHost diagnozi ĉu ĝi atingeblas per HTTP.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date_details": "Por solvi la situacion, inspektu la diferencon per la komandlinio per <cmd>yunohost tools regen-conf nginx --dry-run --with-diff</cmd> kaj se vi aranĝas, apliku la ŝanĝojn per <cmd>yunohost tools regen-conf nginx --force</cmd>.",
|
||||
"global_settings_setting_smtp_allow_ipv6": "Permesu la uzon de IPv6 por ricevi kaj sendi poŝton"
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
"action_invalid": "Acción no válida '{action:s} 1'",
|
||||
"admin_password": "Contraseña administrativa",
|
||||
"admin_password_change_failed": "No se puede cambiar la contraseña",
|
||||
"admin_password_changed": "La contraseña de administración ha sido cambiada",
|
||||
"admin_password_changed": "La contraseña de administración fue cambiada",
|
||||
"app_already_installed": "{app:s} ya está instalada",
|
||||
"app_argument_choice_invalid": "Use una de estas opciones «{choices:s}» para el argumento «{name:s}»",
|
||||
"app_argument_invalid": "Elija un valor válido para el argumento «{name:s}»: {error:s}",
|
||||
|
@ -41,16 +41,16 @@
|
|||
"backup_hook_unknown": "El gancho «{hook:s}» de la copia de seguridad es desconocido",
|
||||
"backup_invalid_archive": "Esto no es un archivo de respaldo",
|
||||
"backup_nothings_done": "Nada que guardar",
|
||||
"backup_output_directory_forbidden": "Elija un directorio de salida diferente. No se pueden crear copias de seguridad en las subcarpetas de /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var o /home/yunohost.backup/archives",
|
||||
"backup_output_directory_forbidden": "Elija un directorio de salida diferente. Las copias de seguridad no se pueden crear en /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var o /home/yunohost.backup/archives subcarpetas",
|
||||
"backup_output_directory_not_empty": "Debe elegir un directorio de salida vacío",
|
||||
"backup_output_directory_required": "Debe proporcionar un directorio de salida para la copia de seguridad",
|
||||
"backup_running_hooks": "Ejecutando los hooks de copia de seguridad...",
|
||||
"custom_app_url_required": "Debe proporcionar una URL para actualizar su aplicación personalizada {app:s}",
|
||||
"domain_cert_gen_failed": "No se pudo generar el certificado",
|
||||
"domain_created": "Dominio creado",
|
||||
"domain_creation_failed": "No se pudo crear el dominio {domain}: {error}",
|
||||
"domain_creation_failed": "No se puede crear el dominio {domain}: {error}",
|
||||
"domain_deleted": "Dominio eliminado",
|
||||
"domain_deletion_failed": "No se pudo eliminar el dominio {domain}: {error}",
|
||||
"domain_deletion_failed": "No se puede eliminar el dominio {domain}: {error}",
|
||||
"domain_dyndns_already_subscribed": "Ya se ha suscrito a un dominio de DynDNS",
|
||||
"domain_dyndns_root_unknown": "Dominio raíz de DynDNS desconocido",
|
||||
"domain_exists": "El dominio ya existe",
|
||||
|
@ -117,20 +117,20 @@
|
|||
"restore_running_app_script": "Restaurando la aplicación «{app:s}»…",
|
||||
"restore_running_hooks": "Ejecutando los ganchos de restauración…",
|
||||
"service_add_failed": "No se pudo añadir el servicio «{service:s}»",
|
||||
"service_added": "Añadido el servicio «{service:s}»",
|
||||
"service_added": "Se agregó el servicio '{service:s}'",
|
||||
"service_already_started": "El servicio «{service:s}» ya está funcionando",
|
||||
"service_already_stopped": "El servicio «{service:s}» ya ha sido detenido",
|
||||
"service_cmd_exec_failed": "No se pudo ejecutar la orden «{command:s}»",
|
||||
"service_disable_failed": "No se pudo desactivar el servicio «{service:s}»\n\nRegistro de servicios recientes:{logs:s}",
|
||||
"service_disabled": "El servicio «{service:s}» ha sido desactivado",
|
||||
"service_enable_failed": "No se pudo activar el servicio «{service:s}»\n\nRegistro de servicios recientes:{logs:s}",
|
||||
"service_enabled": "El servicio «{service:s}» ha sido desactivado",
|
||||
"service_disable_failed": "No se pudo hacer que el servicio '{service:s}' no se iniciara en el arranque.\n\nRegistros de servicio recientes: {logs:s}",
|
||||
"service_disabled": "El servicio '{service:s}' ya no se iniciará cuando se inicie el sistema.",
|
||||
"service_enable_failed": "No se pudo hacer que el servicio '{service:s}' se inicie automáticamente en el arranque.\n\nRegistros de servicio recientes: {logs s}",
|
||||
"service_enabled": "El servicio '{service:s}' ahora se iniciará automáticamente durante el arranque del sistema.",
|
||||
"service_remove_failed": "No se pudo eliminar el servicio «{service:s}»",
|
||||
"service_removed": "Eliminado el servicio «{service:s}»",
|
||||
"service_removed": "Servicio '{service:s}' eliminado",
|
||||
"service_start_failed": "No se pudo iniciar el servicio «{service:s}»\n\nRegistro de servicios recientes:{logs:s}",
|
||||
"service_started": "Iniciado el servicio «{service:s}»",
|
||||
"service_started": "El servicio '{service:s}' comenzó",
|
||||
"service_stop_failed": "No se pudo detener el servicio «{service:s}»\n\nRegistro de servicios recientes:{logs:s}",
|
||||
"service_stopped": "El servicio «{service:s}» se detuvo",
|
||||
"service_stopped": "Servicio '{service:s}' detenido",
|
||||
"service_unknown": "Servicio desconocido '{service:s}'",
|
||||
"ssowat_conf_generated": "Generada la configuración de SSOwat",
|
||||
"ssowat_conf_updated": "Actualizada la configuración de SSOwat",
|
||||
|
@ -161,7 +161,7 @@
|
|||
"yunohost_installing": "Instalando YunoHost…",
|
||||
"yunohost_not_installed": "YunoHost no está correctamente instalado. Ejecute «yunohost tools postinstall»",
|
||||
"ldap_init_failed_to_create_admin": "La inicialización de LDAP no pudo crear el usuario «admin»",
|
||||
"mailbox_used_space_dovecot_down": "El servicio de correo Dovecot debe estar funcionando si desea obtener el espacio usado por el buzón de correo",
|
||||
"mailbox_used_space_dovecot_down": "El servicio de buzón Dovecot debe estar activo si desea recuperar el espacio usado del buzón",
|
||||
"certmanager_attempt_to_replace_valid_cert": "Está intentando sobrescribir un certificado correcto y válido para el dominio {domain:s}! (Use --force para omitir este mensaje)",
|
||||
"certmanager_domain_unknown": "Dominio desconocido «{domain:s}»",
|
||||
"certmanager_domain_cert_not_selfsigned": "El certificado para el dominio {domain:s} no es un certificado autofirmado. ¿Está seguro de que quiere reemplazarlo? (Use «--force» para hacerlo)",
|
||||
|
@ -170,7 +170,7 @@
|
|||
"certmanager_attempt_to_renew_valid_cert": "¡El certificado para el dominio «{domain:s}» no está a punto de expirar! (Puede usar --force si sabe lo que está haciendo)",
|
||||
"certmanager_domain_http_not_working": "Parece que no se puede acceder al dominio {domain:s} a través de HTTP. Compruebe que la configuración del DNS y de NGINX es correcta",
|
||||
"certmanager_error_no_A_record": "No se ha encontrado un registro DNS «A» para el dominio {domain:s}. Debe hacer que su nombre de dominio apunte a su máquina para poder instalar un certificado de Let's Encrypt. (Si sabe lo que está haciendo, use «--no-checks» para desactivar esas comprobaciones.)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "El registro «A» del DNS para el dominio «{domain:s}» es diferente de la IP de este servidor. Si recientemente modificó su registro A, espere a que se propague (existen algunos verificadores de propagación de DNS disponibles en línea). (Si sabe lo que está haciendo, use «--no-checks» para desactivar esas comprobaciones.)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "El registro DNS 'A' para el dominio '{domain:s}' es diferente de la IP de este servidor. Si recientemente modificó su registro A, espere a que se propague (algunos verificadores de propagación de DNS están disponibles en línea). (Si sabe lo que está haciendo, use '--no-checks' para desactivar esos cheques)",
|
||||
"certmanager_cannot_read_cert": "Se ha producido un error al intentar abrir el certificado actual para el dominio {domain:s} (archivo: {file:s}), razón: {reason:s}",
|
||||
"certmanager_cert_install_success_selfsigned": "Instalado correctamente un certificado autofirmado para el dominio «{domain:s}»",
|
||||
"certmanager_cert_install_success": "Instalado correctamente un certificado de Let's Encrypt para el dominio «{domain:s}»",
|
||||
|
@ -179,7 +179,7 @@
|
|||
"certmanager_cert_signing_failed": "No se pudo firmar el nuevo certificado",
|
||||
"certmanager_no_cert_file": "No se pudo leer el certificado para el dominio {domain:s} (archivo: {file:s})",
|
||||
"certmanager_conflicting_nginx_file": "No se pudo preparar el dominio para el desafío ACME: el archivo de configuración de NGINX {filepath:s} está en conflicto y debe ser eliminado primero",
|
||||
"domain_cannot_remove_main": "No se puede eliminar el dominio principal. Primero debes configurar otro utilizando la linea de comando 'yunohost domain main-domain -n <otro_dominio>' donde <otro_dominio> es parte de esta lista: {other_domains:s}",
|
||||
"domain_cannot_remove_main": "No puede eliminar '{domain:s}' ya que es el dominio principal, primero debe configurar otro dominio como el dominio principal usando 'yunohost domain main-domain -n <otro dominio>'; Aquí está la lista de dominios candidatos: {other_domains:s}",
|
||||
"certmanager_self_ca_conf_file_not_found": "No se pudo encontrar el archivo de configuración para la autoridad de autofirma (archivo: {file:s})",
|
||||
"certmanager_unable_to_parse_self_CA_name": "No se pudo procesar el nombre de la autoridad de autofirma (archivo: {file:s})",
|
||||
"domains_available": "Dominios disponibles:",
|
||||
|
@ -189,7 +189,7 @@
|
|||
"certmanager_couldnt_fetch_intermediate_cert": "Tiempo de espera agotado intentando obtener el certificado intermedio de Let's Encrypt. Cancelada la instalación o renovación del certificado. Vuelva a intentarlo más tarde.",
|
||||
"domain_hostname_failed": "No se pudo establecer un nuevo nombre de anfitrión («hostname»). Esto podría causar problemas más tarde (no es seguro... podría ir bien).",
|
||||
"yunohost_ca_creation_success": "Creada la autoridad de certificación local.",
|
||||
"app_already_installed_cant_change_url": "Esta aplicación ya está instalada. No se puede cambiar el URL únicamente mediante esta función. Compruebe si está disponible la opción `app changeurl`.",
|
||||
"app_already_installed_cant_change_url": "Esta aplicación ya está instalada. La URL no se puede cambiar solo con esta función. Marque `app changeurl` si está disponible.",
|
||||
"app_change_url_failed_nginx_reload": "No se pudo recargar NGINX. Esta es la salida de «nginx -t»:\n{nginx_errors:s}",
|
||||
"app_change_url_identical_domains": "El antiguo y nuevo dominio/url_path son idénticos ('{domain:s} {path:s}'), no se realizarán cambios.",
|
||||
"app_change_url_no_script": "La aplicación «{app_name:s}» aún no permite la modificación de URLs. Quizás debería actualizarla.",
|
||||
|
@ -222,8 +222,8 @@
|
|||
"dyndns_could_not_check_provide": "No se pudo verificar si {provider:s} puede ofrecer {domain:s}.",
|
||||
"dyndns_domain_not_provided": "El proveedor de DynDNS {provider:s} no puede proporcionar el dominio {domain:s}.",
|
||||
"experimental_feature": "Aviso : esta funcionalidad es experimental y no se considera estable, no debería usarla a menos que sepa lo que está haciendo.",
|
||||
"good_practices_about_user_password": "Está a punto de establecer una nueva contraseña de usuario. La contraseña debería de ser de al menos 8 caracteres, aunque es una buena práctica usar una contraseña más extensa (básicamente una frase) y/o usar caracteres de varias clases (mayúsculas, minúsculas, números y caracteres especiales).",
|
||||
"password_listed": "Esta contraseña es una de las más usadas en el mundo. Elija algo más único.",
|
||||
"good_practices_about_user_password": "Ahora está a punto de definir una nueva contraseña de usuario. La contraseña debe tener al menos 8 caracteres, aunque es una buena práctica usar una contraseña más larga (es decir, una frase de contraseña) y / o una variación de caracteres (mayúsculas, minúsculas, dígitos y caracteres especiales).",
|
||||
"password_listed": "Esta contraseña se encuentra entre las contraseñas más utilizadas en el mundo. Por favor, elija algo más único.",
|
||||
"password_too_simple_1": "La contraseña debe tener al menos 8 caracteres de longitud",
|
||||
"password_too_simple_2": "La contraseña tiene que ser de al menos 8 caracteres de longitud e incluir un número y caracteres en mayúsculas y minúsculas",
|
||||
"password_too_simple_3": "La contraseña tiene que ser de al menos 8 caracteres de longitud e incluir un número, mayúsculas, minúsculas y caracteres especiales",
|
||||
|
@ -232,7 +232,7 @@
|
|||
"update_apt_cache_warning": "Algo fue mal durante la actualización de la caché de APT (gestor de paquetes de Debian). Aquí tiene un volcado de las líneas de sources.list que podría ayudarle a identificar las líneas problemáticas:\n{sourceslist}",
|
||||
"update_apt_cache_failed": "No se pudo actualizar la caché de APT (gestor de paquetes de Debian). Aquí tiene un volcado de las líneas de sources.list que podría ayudarle a identificar las líneas problemáticas:\n{sourceslist}",
|
||||
"tools_upgrade_special_packages_completed": "Actualización de paquetes de YunoHost completada.\nPulse [Intro] para regresar a la línea de órdenes",
|
||||
"tools_upgrade_special_packages_explanation": "Esta acción terminará pero la actualización especial real continuará en segundo plano. No inicie ninguna otra acción en su servidor en aproximadamente 10 minutos (dependiendo de la velocidad de su hardware). Una vez hecho, podría tener que volver a iniciar sesión en la administración web. El registro de actualización estará disponible en Herramientas → Registro (en la página de administración web) o mediante «yunohost log list» (desde la línea de órdenes).",
|
||||
"tools_upgrade_special_packages_explanation": "La actualización especial continuará en segundo plano. No inicie ninguna otra acción en su servidor durante los próximos 10 minutos (dependiendo de la velocidad del hardware). Después de esto, es posible que deba volver a iniciar sesión en el administrador web. El registro de actualización estará disponible en Herramientas → Registro (en el webadmin) o usando 'yunohost log list' (desde la línea de comandos).",
|
||||
"tools_upgrade_special_packages": "Actualizando ahora paquetes «especiales» (relacionados con YunoHost)…",
|
||||
"tools_upgrade_regular_packages_failed": "No se pudieron actualizar los paquetes: {packages_list}",
|
||||
"tools_upgrade_regular_packages": "Actualizando ahora paquetes «normales» (no relacionados con YunoHost)…",
|
||||
|
@ -241,11 +241,11 @@
|
|||
"tools_upgrade_cant_both": "No se puede actualizar el sistema y las aplicaciones al mismo tiempo",
|
||||
"tools_upgrade_at_least_one": "Especifique «--apps», o «--system»",
|
||||
"this_action_broke_dpkg": "Esta acción rompió dpkg/APT(los gestores de paquetes del sistema)… Puede tratar de solucionar este problema conectando mediante SSH y ejecutando `sudo dpkg --configure -a`.",
|
||||
"service_reloaded_or_restarted": "El servicio «{service:s}» ha sido recargado o reiniciado",
|
||||
"service_reloaded_or_restarted": "El servicio '{service:s}' fue recargado o reiniciado",
|
||||
"service_reload_or_restart_failed": "No se pudo recargar o reiniciar el servicio «{service:s}»\n\nRegistro de servicios recientes:{logs:s}",
|
||||
"service_restarted": "Reiniciado el servicio «{service:s}»",
|
||||
"service_restarted": "Servicio '{service:s}' reiniciado",
|
||||
"service_restart_failed": "No se pudo reiniciar el servicio «{service:s}»\n\nRegistro de servicios recientes:{logs:s}",
|
||||
"service_reloaded": "El servicio «{service:s}» ha sido recargado",
|
||||
"service_reloaded": "Servicio '{service:s}' recargado",
|
||||
"service_reload_failed": "No se pudo recargar el servicio «{service:s}»\n\nRegistro de servicios recientes:{logs:s}",
|
||||
"service_regen_conf_is_deprecated": "¡«yunohost service regen-conf» está obsoleto! Use «yunohost tools regen-conf» en su lugar.",
|
||||
"service_description_yunohost-firewall": "Gestiona los puertos de conexiones abiertos y cerrados a los servicios",
|
||||
|
@ -280,7 +280,7 @@
|
|||
"regenconf_failed": "No se pudo regenerar la configuración para la(s) categoría(s): {categories}",
|
||||
"regenconf_dry_pending_applying": "Comprobando la configuración pendiente que habría sido aplicada para la categoría «{category}»…",
|
||||
"regenconf_would_be_updated": "La configuración habría sido actualizada para la categoría «{category}»",
|
||||
"regenconf_updated": "Actualizada la configuración para la categoría '{category}'",
|
||||
"regenconf_updated": "Configuración actualizada para '{category}'",
|
||||
"regenconf_up_to_date": "Ya está actualizada la configuración para la categoría «{category}»",
|
||||
"regenconf_now_managed_by_yunohost": "El archivo de configuración «{conf}» está gestionado ahora por YunoHost (categoría {category}).",
|
||||
"regenconf_file_updated": "Actualizado el archivo de configuración «{conf}»",
|
||||
|
@ -345,7 +345,7 @@
|
|||
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 está instalado pero no PostgreSQL 9.6. Algo raro podría haber ocurrido en su sistema:(…",
|
||||
"migration_0005_postgresql_94_not_installed": "PostgreSQL no estaba instalado en su sistema. Nada que hacer.",
|
||||
"migration_0003_modified_files": "Tenga en cuenta que se encontró que los siguientes archivos fueron modificados manualmente y podrían ser sobrescritos después de la actualización: {manually_modified_files}",
|
||||
"migration_0003_problematic_apps_warning": "Tenga en cuenta que las aplicaciones listadas mas abajo fueron detectadas como 'posiblemente problemáticas'. Parece que no fueron instaladas desde una lista de aplicaciones o no estaban etiquetadas como 'funcional'. Así que no hay garantía de que aún funcionen después de la actualización: {problematic_apps}",
|
||||
"migration_0003_problematic_apps_warning": "Tenga en cuenta que se detectaron las siguientes aplicaciones instaladas posiblemente problemáticas. Parece que no se instalaron desde un catálogo de aplicaciones, o no se marcan como \"en funcionamiento\". En consecuencia, no se puede garantizar que seguirán funcionando después de la actualización: {problematic_apps}",
|
||||
"migration_0003_general_warning": "Tenga en cuenta que esta migración es una operación delicada. El equipo de YunoHost ha hecho todo lo posible para revisarla y probarla, pero la migración aún podría romper parte del sistema o de sus aplicaciones.\n\nPor lo tanto, se recomienda que:\n - Realice una copia de seguridad de cualquier dato crítico o aplicación. Más información en https://yunohost.org/backup;\n - Tenga paciencia tras iniciar la migración: dependiendo de su conexión a Internet y de su hardware, podría tardar unas cuantas horas hasta que todo se actualice.\n\nAdemás, el puerto para SMTP usado por los clientes de correo externos (como Thunderbird o K9-Mail) cambió de 465 (SSL/TLS) a 587 (STARTTLS). El antiguo puerto (465) se cerrará automáticamente y el nuevo puerto (587) se abrirá en el cortafuegos. Todos los usuarios *tendrán* que adaptar la configuración de sus clientes de correo por lo tanto.",
|
||||
"migration_0003_still_on_jessie_after_main_upgrade": "Algo fue mal durante la actualización principal: ⸘el sistema está aún en Jessie‽ Para investigar el problema, vea {log}:s…",
|
||||
"migration_0003_system_not_fully_up_to_date": "Su sistema no está totalmente actualizado. Realice una actualización normal antes de ejecutar la migración a Stretch.",
|
||||
|
@ -358,7 +358,7 @@
|
|||
"migration_0003_start": "Iniciando migración a Stretch. El registro estará disponible en {logfile}.",
|
||||
"migration_description_0012_postgresql_password_to_md5_authentication": "Forzar a la autentificación de PostgreSQL a usar MD5 para las conexiones locales",
|
||||
"migration_description_0011_setup_group_permission": "Configurar grupo de usuario y permisos para aplicaciones y servicios",
|
||||
"migration_description_0010_migrate_to_apps_json": "Eliminar las listas de aplicaciones («appslists») obsoletas y usar en su lugar la nueva lista unificada «apps.json»",
|
||||
"migration_description_0010_migrate_to_apps_json": "Elimine los catálogos de aplicaciones obsoletas y use la nueva lista unificada de 'apps.json' en su lugar (desactualizada, reemplazada por la migración 13)",
|
||||
"migration_description_0009_decouple_regenconf_from_services": "Separar el mecanismo «regen-conf» de los servicios",
|
||||
"migration_description_0008_ssh_conf_managed_by_yunohost_step2": "Permitir que la configuración de SSH la gestione YunoHost (paso 2, manual)",
|
||||
"migration_description_0007_ssh_conf_managed_by_yunohost_step1": "Permitir que la configuración de SSH la gestione YunoHost (paso 1, automático)",
|
||||
|
@ -422,7 +422,7 @@
|
|||
"group_deleted": "Eliminado el grupo «{group}»",
|
||||
"group_creation_failed": "No se pudo crear el grupo «{group}»: {error}",
|
||||
"group_created": "Creado el grupo «{group}»",
|
||||
"good_practices_about_admin_password": "Va a establecer una nueva contraseña de administración. La contraseña debería tener al menos 8 caracteres, aunque es una buena práctica usar una contraseña más extensa (básicamente una frase) y/o usar caracteres de varias clases (mayúsculas, minúsculas, números y caracteres especiales).",
|
||||
"good_practices_about_admin_password": "Ahora está a punto de definir una nueva contraseña de administración. La contraseña debe tener al menos 8 caracteres, aunque es una buena práctica usar una contraseña más larga (es decir, una frase de contraseña) y / o usar una variación de caracteres (mayúsculas, minúsculas, dígitos y caracteres especiales).",
|
||||
"global_settings_unknown_type": "Situación imprevista, la configuración {setting:s} parece tener el tipo {unknown_type:s} pero no es un tipo compatible con el sistema.",
|
||||
"global_settings_setting_service_ssh_allow_deprecated_dsa_hostkey": "Permitir el uso de la llave (obsoleta) DSA para la configuración del demonio SSH",
|
||||
"global_settings_unknown_setting_from_settings_file": "Clave desconocida en la configuración: «{setting_key:s}», desechada y guardada en /etc/yunohost/settings-unknown.json",
|
||||
|
@ -447,8 +447,8 @@
|
|||
"domain_dns_conf_is_just_a_recommendation": "Esta orden muestra la configuración *recomendada*. No configura el DNS en realidad. Es su responsabilidad configurar la zona de DNS en su registrador según esta recomendación.",
|
||||
"dpkg_lock_not_available": "Esta orden no se puede ejecutar en este momento ,parece que programa está usando el bloqueo de dpkg (el gestor de paquetes del sistema)",
|
||||
"dpkg_is_broken": "No puede hacer esto en este momento porque dpkg/apt (los gestores de paquetes del sistema) parecen estar en un estado roto... Puede tratar de solucionar este problema conectando a través de SSH y ejecutando `sudo dpkg --configure -a`.",
|
||||
"confirm_app_install_thirdparty": "¡PELIGRO! Esta aplicación no forma parte del catálogo de aplicaciones de YunoHost. Instalar aplicaciones de terceros podría comprometer la integridad y seguridad de su sistema. Probablemente NO debería instalarla salvo que sepa lo que está haciendo. No tendrá NINGUNA AYUDA si esta aplicación no funciona o rompe su sistema… Si está dispuesto a aceptar ese riesgo de todas formas, escriba «{answers:s}»",
|
||||
"confirm_app_install_danger": "¡PELIGRO! ¡Esta aplicación es conocida por ser aún experimental (o no funciona explícitamente)! Probablemente NO debería instalarla salvo que sepa lo que está haciendo. No tendrá NINGUNA AYUDA si esta aplicación no funciona o rompe su sistema… Si está dispuesto a aceptar ese riesgo de todas formas, escriba «{answers:s}»",
|
||||
"confirm_app_install_thirdparty": "¡PELIGRO! Esta aplicación no forma parte del catálogo de aplicaciones de Yunohost. La instalación de aplicaciones de terceros puede comprometer la integridad y la seguridad de su sistema. Probablemente NO debería instalarlo a menos que sepa lo que está haciendo. NO se proporcionará SOPORTE si esta aplicación no funciona o rompe su sistema ... Si de todos modos está dispuesto a correr ese riesgo, escriba '{answers:s}'",
|
||||
"confirm_app_install_danger": "¡PELIGRO! ¡Se sabe que esta aplicación sigue siendo experimental (si no explícitamente no funciona)! Probablemente NO debería instalarlo a menos que sepa lo que está haciendo. NO se proporcionará SOPORTE si esta aplicación no funciona o rompe su sistema ... Si de todos modos está dispuesto a correr ese riesgo, escriba '{answers:s}'",
|
||||
"confirm_app_install_warning": "Aviso: esta aplicación puede funcionar pero no está bien integrada en YunoHost. Algunas herramientas como la autentificación única y respaldo/restauración podrían no estar disponibles. ¿Instalar de todos modos? [{answers:s}] ",
|
||||
"backup_unable_to_organize_files": "No se pudo usar el método rápido de organización de los archivos en el archivo",
|
||||
"backup_permission": "Permiso de respaldo para la aplicación {app:s}",
|
||||
|
@ -467,13 +467,13 @@
|
|||
"app_start_backup": "Obteniendo archivos para el respaldo de «{app}»…",
|
||||
"app_start_remove": "Eliminando aplicación «{app}»…",
|
||||
"app_start_install": "Instalando aplicación «{app}»…",
|
||||
"app_not_upgraded": "Error al actualizar la aplicación «{failed_app}» y como consecuencia se han cancelado las actualizaciones de las siguientes aplicaciones: {apps}",
|
||||
"app_not_upgraded": "La aplicación '{failed_app}' no se pudo actualizar y, como consecuencia, se cancelaron las actualizaciones de las siguientes aplicaciones: {apps}",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "Estos servicios necesarios deberían estar funcionando para ejecutar esta acción: {services}. Pruebe a reiniciarlos para continuar (y posiblemente investigar por qué están caídos).",
|
||||
"already_up_to_date": "Nada que hacer. Todo está actualizado.",
|
||||
"admin_password_too_long": "Elija una contraseña de menos de 127 caracteres",
|
||||
"aborting": "Cancelando.",
|
||||
"app_action_broke_system": "Esta acción parece que ha roto estos servicios importantes: {services}",
|
||||
"operation_interrupted": "¿Ha sido interrumpida la operación manualmente?",
|
||||
"operation_interrupted": "¿La operación fue interrumpida manualmente?",
|
||||
"apps_already_up_to_date": "Todas las aplicaciones están ya actualizadas",
|
||||
"dyndns_provider_unreachable": "No se puede conectar con el proveedor de Dyndns {provider}: o su YunoHost no está correctamente conectado a Internet o el servidor dynette está caído.",
|
||||
"group_already_exist": "El grupo {group} ya existe",
|
||||
|
@ -488,7 +488,7 @@
|
|||
"log_user_permission_reset": "Restablecer permiso «{}»",
|
||||
"migration_0011_failed_to_remove_stale_object": "No se pudo eliminar el objeto obsoleto {dn}: {error}",
|
||||
"permission_already_allowed": "El grupo «{group}» ya tiene el permiso «{permission}» activado",
|
||||
"permission_already_disallowed": "El grupo «{group}» ya tiene el permiso «{permission}» desactivado",
|
||||
"permission_already_disallowed": "El grupo '{group}' ya tiene el permiso '{permission}' deshabilitado",
|
||||
"permission_cannot_remove_main": "No está permitido eliminar un permiso principal",
|
||||
"user_already_exists": "El usuario «{user}» ya existe",
|
||||
"app_full_domain_unavailable": "Lamentablemente esta aplicación tiene que instalarse en un dominio propio pero ya hay otras aplicaciones instaladas en el dominio «{domain}». Podría usar un subdomino dedicado a esta aplicación en su lugar.",
|
||||
|
@ -503,20 +503,20 @@
|
|||
"permission_currently_allowed_for_all_users": "Este permiso se concede actualmente a todos los usuarios además de los otros grupos. Probablemente quiere o eliminar el permiso de «all_users» o eliminar los otros grupos a los que está otorgado actualmente.",
|
||||
"permission_require_account": "El permiso {permission} solo tiene sentido para usuarios con una cuenta y, por lo tanto, no se puede activar para visitantes.",
|
||||
"app_remove_after_failed_install": "Eliminando la aplicación tras el fallo de instalación…",
|
||||
"diagnosis_basesystem_host": "El servidor está ejecutando Debian {debian_version}.",
|
||||
"diagnosis_basesystem_host": "El servidor está ejecutando Debian {debian_version}",
|
||||
"diagnosis_basesystem_kernel": "El servidor está ejecutando el núcleo de Linux {kernel_version}",
|
||||
"diagnosis_basesystem_ynh_single_version": "{package} versión: {version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_main_version": "El servidor está ejecutando YunoHost {main_version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_inconsistent_versions": "Está ejecutando versiones incoherentes de los paquetes de YunoHost... probablemente por una actualización errónea o parcial.",
|
||||
"diagnosis_failed_for_category": "Diagnóstico fallido para la categoría «{category}» : {error}",
|
||||
"diagnosis_basesystem_ynh_inconsistent_versions": "Está ejecutando versiones inconsistentes de los paquetes de YunoHost ... probablemente debido a una actualización parcial o fallida.",
|
||||
"diagnosis_failed_for_category": "Error de diagnóstico para la categoría '{category}': {error}",
|
||||
"diagnosis_cache_still_valid": "(Caché aún válida para el diagnóstico de {category}. ¡Aún no se ha rediagnosticado!)",
|
||||
"diagnosis_found_errors_and_warnings": "¡Encontrado(s) error(es) significativo(s) {errors} (y aviso(s) {warnings}) relacionado(s) con {category}!",
|
||||
"diagnosis_display_tip_web": "Puede ir a la sección de diagnóstico (en la pantalla principal) para ver los problemas encontrados.",
|
||||
"diagnosis_display_tip_cli": "Puede ejecutar «yunohost diagnosis show --issues» para mostrar los problemas encontrados.",
|
||||
"apps_catalog_init_success": "¡Sistema de catálogo de aplicaciones inicializado!",
|
||||
"apps_catalog_updating": "Actualizando catálogo de aplicaciones...",
|
||||
"apps_catalog_failed_to_download": "No se pudo descargar el catálogo de aplicaciones {apps_catalog}: {error}",
|
||||
"apps_catalog_obsolete_cache": "La caché del catálogo de aplicaciones está vacía u obsoleta.",
|
||||
"apps_catalog_updating": "Actualizando el catálogo de aplicaciones…",
|
||||
"apps_catalog_failed_to_download": "No se puede descargar el catálogo de aplicaciones {apps_catalog}: {error}",
|
||||
"apps_catalog_obsolete_cache": "El caché del catálogo de aplicaciones está vacío u obsoleto.",
|
||||
"apps_catalog_update_success": "¡El catálogo de aplicaciones ha sido actualizado!",
|
||||
"diagnosis_cant_run_because_of_dep": "No se puede ejecutar el diagnóstico para {category} mientras haya problemas importantes relacionados con {dep}.",
|
||||
"diagnosis_ignored_issues": "(+ {nb_ignored} problema(s) ignorado(s))",
|
||||
|
@ -545,9 +545,9 @@
|
|||
"diagnosis_diskusage_ok": "¡El almacenamiento {mountpoint} (en el dispositivo {device}) todavía tiene {free} ({free_percent}%) de espacio libre!",
|
||||
"diagnosis_services_conf_broken": "¡Mala configuración para el servicio {service}!",
|
||||
"diagnosis_services_running": "¡El servicio {service} está en ejecución!",
|
||||
"diagnosis_failed": "No se ha podido obtener el resultado del diagnóstico para la categoría '{category}': {error}",
|
||||
"diagnosis_failed": "Error al obtener el resultado del diagnóstico para la categoría '{category}': {error}",
|
||||
"diagnosis_ip_connected_ipv4": "¡El servidor está conectado a internet a través de IPv4!",
|
||||
"diagnosis_security_vulnerable_to_meltdown_details": "Para corregir esto, debieras actualizar y reiniciar tu sistema para cargar el nuevo kernel de Linux (o contacta tu proveedor si esto no funciona). Mas información en https://meltdownattack.com/",
|
||||
"diagnosis_security_vulnerable_to_meltdown_details": "Para corregir esto, debieras actualizar y reiniciar tu sistema para cargar el nuevo kernel de Linux (o contacta tu proveedor si esto no funciona). Mas información en https://meltdownattack.com/ .",
|
||||
"diagnosis_ram_verylow": "Al sistema le queda solamente {available} ({available_percent}%) de RAM! (De un total de {total})",
|
||||
"diagnosis_ram_low": "Al sistema le queda {available} ({available_percent}%) de RAM de un total de {total}. Cuidado.",
|
||||
"diagnosis_ram_ok": "El sistema aun tiene {available} ({available_percent}%) de RAM de un total de {total}.",
|
||||
|
@ -561,7 +561,7 @@
|
|||
"diagnosis_regenconf_manually_modified_debian": "El archivos de configuración {file} fue modificado manualmente comparado con el valor predeterminado de Debian.",
|
||||
"diagnosis_regenconf_manually_modified_debian_details": "Esto este probablemente BIEN, pero igual no lo pierdas de vista...",
|
||||
"diagnosis_security_all_good": "Ninguna vulnerabilidad critica de seguridad fue encontrada.",
|
||||
"diagnosis_security_vulnerable_to_meltdown": "Pareces vulnerable a el colapso de vulnerabilidad critica de seguridad.",
|
||||
"diagnosis_security_vulnerable_to_meltdown": "Pareces vulnerable a el colapso de vulnerabilidad critica de seguridad",
|
||||
"diagnosis_description_basesystem": "Sistema de base",
|
||||
"diagnosis_description_ip": "Conectividad a Internet",
|
||||
"diagnosis_description_dnsrecords": "Registro DNS",
|
||||
|
@ -588,15 +588,22 @@
|
|||
"log_app_action_run": "Inicializa la acción de la aplicación '{}'",
|
||||
"group_already_exist_on_system_but_removing_it": "El grupo {group} ya existe en el grupo de sistema, pero YunoHost lo suprimirá …",
|
||||
"global_settings_setting_pop3_enabled": "Habilita el protocolo POP3 para el servidor de correo electrónico",
|
||||
"domain_cannot_remove_main_add_new_one": "No se puede remover '{domain:s}' porque es su principal y único dominio. Primero debe agregar un nuevo dominio con la linea de comando 'yunohost domain add <another-domain.com>', entonces configurarlo como dominio principal con 'yunohost domain main-domain -n <another-domain.com>' y finalmente borrar el dominio '{domain:s}' con 'yunohost domain remove {domain:s}'.",
|
||||
"domain_cannot_remove_main_add_new_one": "No se puede remover '{domain:s}' porque es su principal y único dominio. Primero debe agregar un nuevo dominio con la linea de comando 'yunohost domain add <another-domain.com>', entonces configurarlo como dominio principal con 'yunohost domain main-domain -n <another-domain.com>' y finalmente borrar el dominio '{domain:s}' con 'yunohost domain remove {domain:s}'.'",
|
||||
"diagnosis_never_ran_yet": "Este servidor todavía no tiene reportes de diagnostico. Puede iniciar un diagnostico completo desde la interface administrador web o con la linea de comando 'yunohost diagnosis run'.",
|
||||
"diagnosis_unknown_categories": "Las siguientes categorías están desconocidas: {categories}",
|
||||
"diagnosis_http_unreachable": "El dominio {domain} esta fuera de alcance desde internet y a través de HTTP.",
|
||||
"diagnosis_http_bad_status_code": "El sistema de diagnostico no pudo comunicarse con su servidor. Puede ser otra maquina que contesto en lugar del servidor. Debería verificar en su firewall que el re-direccionamiento del puerto 80 esta correcto.",
|
||||
"diagnosis_http_connection_error": "Error de conexión: Ne se pudo conectar al dominio solicitado,",
|
||||
"diagnosis_http_connection_error": "Error de conexión: Ne se pudo conectar al dominio solicitado.",
|
||||
"diagnosis_http_timeout": "El intento de contactar a su servidor desde internet corrió fuera de tiempo. Al parece esta incomunicado. Debería verificar que nginx corre en el puerto 80, y que la redireción del puerto 80 no interfiere con en el firewall.",
|
||||
"diagnosis_http_ok": "El Dominio {domain} es accesible desde internet a través de HTTP.",
|
||||
"diagnosis_http_could_not_diagnose": "No se pudo verificar si el dominio es accesible desde internet.",
|
||||
"diagnosis_http_could_not_diagnose_details": "Error: {error}",
|
||||
"diagnosis_ports_forwarding_tip": "Para solucionar este incidente, debería configurar el \"port forwading\" en su router como especificado en https://yunohost.org/isp_box_config"
|
||||
"diagnosis_ports_forwarding_tip": "Para solucionar este incidente, debería configurar el \"port forwading\" en su router como especificado en https://yunohost.org/isp_box_config",
|
||||
"certmanager_warning_subdomain_dns_record": "El subdominio '{subdomain:s}' no se resuelve en la misma dirección IP que '{domain:s}'. Algunas funciones no estarán disponibles hasta que solucione esto y regenere el certificado.",
|
||||
"domain_cannot_add_xmpp_upload": "No puede agregar dominios que comiencen con 'xmpp-upload'. Este tipo de nombre está reservado para la función de carga XMPP integrada en YunoHost.",
|
||||
"yunohost_postinstall_end_tip": "¡La post-instalación completada! Para finalizar su configuración, considere:\n - agregar un primer usuario a través de la sección 'Usuarios' del webadmin (o 'yunohost user create <username>' en la línea de comandos);\n - diagnostique problemas potenciales a través de la sección 'Diagnóstico' de webadmin (o 'ejecución de diagnóstico yunohost' en la línea de comandos);\n - leyendo las partes 'Finalizando su configuración' y 'Conociendo a Yunohost' en la documentación del administrador: https://yunohost.org/admindoc.",
|
||||
"diagnosis_dns_point_to_doc": "Por favor, consulta la documentación en <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si necesitas ayuda para configurar los registros DNS.",
|
||||
"diagnosis_ip_global": "IP Global: <code>{global}</code>",
|
||||
"diagnosis_mail_outgoing_port_25_ok": "El servidor de email SMTP puede mandar emails (puerto saliente 25 no está bloqueado).",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_details": "Deberías intentar desbloquear el puerto 25 saliente en la interfaz de tu router o en la interfaz de tu provedor de hosting. (Algunos hosting pueden necesitar que les abras un ticket de soporte para esto)."
|
||||
}
|
||||
|
|
284
locales/fr.json
284
locales/fr.json
|
@ -12,7 +12,7 @@
|
|||
"app_install_files_invalid": "Fichiers d’installation incorrects",
|
||||
"app_manifest_invalid": "Manifeste d’application incorrect : {error}",
|
||||
"app_not_correctly_installed": "{app:s} semble être mal installé",
|
||||
"app_not_installed": "Nous n’avons pas trouvé l’application « {app:s} » dans la liste des applications installées: {all_apps}",
|
||||
"app_not_installed": "Nous n’avons pas trouvé l’application « {app:s} » dans la liste des applications installées : {all_apps}",
|
||||
"app_not_properly_removed": "{app:s} n’a pas été supprimé correctement",
|
||||
"app_removed": "{app:s} supprimé",
|
||||
"app_requirements_checking": "Vérification des paquets requis pour {app} …",
|
||||
|
@ -41,20 +41,20 @@
|
|||
"backup_hook_unknown": "Script de sauvegarde '{hook:s}' inconnu",
|
||||
"backup_invalid_archive": "Archive de sauvegarde invalide",
|
||||
"backup_nothings_done": "Il n’y a rien à sauvegarder",
|
||||
"backup_output_directory_forbidden": "Dossier de destination interdit. Les sauvegardes ne peuvent être créées dans les sous-dossiers /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives",
|
||||
"backup_output_directory_forbidden": "Choisissez un répertoire de destination différent. Les sauvegardes ne peuvent pas être créées dans les sous-dossiers /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives",
|
||||
"backup_output_directory_not_empty": "Le répertoire de destination n’est pas vide",
|
||||
"backup_output_directory_required": "Vous devez spécifier un dossier de destination pour la sauvegarde",
|
||||
"backup_running_hooks": "Exécution des scripts de sauvegarde …",
|
||||
"custom_app_url_required": "Vous devez spécifier une URL pour mettre à jour votre application personnalisée {app:s}",
|
||||
"domain_cert_gen_failed": "Impossible de générer le certificat",
|
||||
"domain_created": "Le domaine a été créé",
|
||||
"domain_creation_failed": "Impossible de créer le domaine {domain}: {error}",
|
||||
"domain_creation_failed": "Impossible de créer le domaine {domain} : {error}",
|
||||
"domain_deleted": "Le domaine a été supprimé",
|
||||
"domain_deletion_failed": "Impossible de supprimer le domaine {domain}:{error}",
|
||||
"domain_deletion_failed": "Impossible de supprimer le domaine {domain} : {error}",
|
||||
"domain_dyndns_already_subscribed": "Vous avez déjà souscris à un domaine DynDNS",
|
||||
"domain_dyndns_root_unknown": "Domaine DynDNS principal inconnu",
|
||||
"domain_exists": "Le domaine existe déjà",
|
||||
"domain_uninstall_app_first": "Une ou plusieurs applications sont installées sur ce domaine. Veuillez d’abord les désinstaller avant de supprimer ce domaine",
|
||||
"domain_uninstall_app_first": "Ces applications sont toujours installées sur votre domaine: {apps}. Veuillez d’abord les désinstaller avant de supprimer ce domaine",
|
||||
"domain_unknown": "Domaine inconnu",
|
||||
"done": "Terminé",
|
||||
"downloading": "Téléchargement en cours …",
|
||||
|
@ -75,7 +75,7 @@
|
|||
"field_invalid": "Champ incorrect : '{:s}'",
|
||||
"firewall_reload_failed": "Impossible de recharger le pare-feu",
|
||||
"firewall_reloaded": "Pare-feu rechargé",
|
||||
"firewall_rules_cmd_failed": "Certaines règles du pare-feu n’ont pas pu être appliquées. Plus d’info dans le journal de log.",
|
||||
"firewall_rules_cmd_failed": "Certaines commandes de règles de pare-feu ont échoué. Plus d'informations dans le journal.",
|
||||
"hook_exec_failed": "Échec de l’exécution du script : {path:s}",
|
||||
"hook_exec_not_terminated": "L’exécution du script {path:s} ne s’est pas terminée correctement",
|
||||
"hook_list_by_invalid": "Propriété invalide pour lister les actions par celle-ci",
|
||||
|
@ -89,7 +89,7 @@
|
|||
"mail_domain_unknown": "Le domaine '{domain:s}' de cette adresse de courriel n’est pas valide. Merci d’utiliser un domaine administré par ce serveur.",
|
||||
"mail_forward_remove_failed": "Impossible de supprimer le courriel de transfert '{mail:s}'",
|
||||
"main_domain_change_failed": "Impossible de modifier le domaine principal",
|
||||
"main_domain_changed": "Le domaine principal modifié",
|
||||
"main_domain_changed": "Le domaine principal a été modifié",
|
||||
"no_internet_connection": "Le serveur n’est pas connecté à Internet",
|
||||
"not_enough_disk_space": "L’espace disque est insuffisant sur '{path:s}'",
|
||||
"package_unknown": "Le paquet '{pkgname}' est inconnu",
|
||||
|
@ -114,8 +114,8 @@
|
|||
"restore_failed": "Impossible de restaurer le système",
|
||||
"restore_hook_unavailable": "Le script de restauration '{part:s}' n’est pas disponible sur votre système, et ne l’est pas non plus dans l’archive",
|
||||
"restore_nothings_done": "Rien n’a été restauré",
|
||||
"restore_running_app_script": "Exécution du script de restauration de l’application '{app:s}' …",
|
||||
"restore_running_hooks": "Exécution des scripts de restauration …",
|
||||
"restore_running_app_script": "Exécution du script de restauration de l’application '{app:s}'…",
|
||||
"restore_running_hooks": "Exécution des scripts de restauration…",
|
||||
"service_add_failed": "Impossible d’ajouter le service '{service:s}'",
|
||||
"service_added": "Le service '{service:s}' a été ajouté",
|
||||
"service_already_started": "Le service '{service:s}' est déjà en cours d’exécution",
|
||||
|
@ -140,25 +140,25 @@
|
|||
"unexpected_error": "Une erreur inattendue est survenue : {error}",
|
||||
"unlimit": "Pas de quota",
|
||||
"unrestore_app": "L’application '{app:s}' ne sera pas restaurée",
|
||||
"updating_apt_cache": "Récupération des mises à jour disponibles pour les paquets du système …",
|
||||
"updating_apt_cache": "Récupération des mises à jour disponibles pour les paquets du système…",
|
||||
"upgrade_complete": "Mise à jour terminée",
|
||||
"upgrading_packages": "Mise à jour des paquets en cours …",
|
||||
"upgrading_packages": "Mise à jour des paquets en cours…",
|
||||
"upnp_dev_not_found": "Aucun périphérique compatible UPnP n’a été trouvé",
|
||||
"upnp_disabled": "UPnP désactivé",
|
||||
"upnp_enabled": "UPnP activé",
|
||||
"upnp_port_open_failed": "Impossible d’ouvrir les ports UPnP",
|
||||
"user_created": "L’utilisateur créé",
|
||||
"user_creation_failed": "Impossible de créer l’utilisateur {user}: {error}",
|
||||
"user_creation_failed": "Impossible de créer l’utilisateur {user} : {error}",
|
||||
"user_deleted": "L’utilisateur supprimé",
|
||||
"user_deletion_failed": "Impossible de supprimer l’utilisateur {user}: {error}",
|
||||
"user_deletion_failed": "Impossible de supprimer l’utilisateur {user} : {error}",
|
||||
"user_home_creation_failed": "Impossible de créer le dossier personnel de l’utilisateur",
|
||||
"user_unknown": "L’utilisateur {user:s} est inconnu",
|
||||
"user_update_failed": "Impossible de mettre à jour l’utilisateur {user}: {error}",
|
||||
"user_update_failed": "Impossible de mettre à jour l’utilisateur {user} : {error}",
|
||||
"user_updated": "L’utilisateur a été modifié",
|
||||
"yunohost_already_installed": "YunoHost est déjà installé",
|
||||
"yunohost_ca_creation_failed": "Impossible de créer l’autorité de certification",
|
||||
"yunohost_configured": "YunoHost est maintenant configuré",
|
||||
"yunohost_installing": "L’installation de YunoHost est en cours …",
|
||||
"yunohost_installing": "L’installation de YunoHost est en cours…",
|
||||
"yunohost_not_installed": "YunoHost n’est pas correctement installé. Veuillez exécuter 'yunohost tools postinstall'",
|
||||
"certmanager_attempt_to_replace_valid_cert": "Vous êtes en train de vouloir remplacer un certificat correct et valide pour le domaine {domain:s} ! (Utilisez --force pour contourner cela)",
|
||||
"certmanager_domain_unknown": "Domaine {domain:s} inconnu",
|
||||
|
@ -166,9 +166,9 @@
|
|||
"certmanager_certificate_fetching_or_enabling_failed": "Il semble que l’activation du nouveau certificat pour {domain:s} a échoué …",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "Le certificat pour le domaine {domain:s} n’est pas émis par Let’s Encrypt. Impossible de le renouveler automatiquement !",
|
||||
"certmanager_attempt_to_renew_valid_cert": "Le certificat pour le domaine {domain:s} n’est pas sur le point d’expirer ! (Vous pouvez utiliser --force si vous savez ce que vous faites)",
|
||||
"certmanager_domain_http_not_working": "Il semble que le domaine {domain:s} ne soit pas accessible via HTTP. Veuillez vérifier que vos configuration DNS et Nginx sont correctes",
|
||||
"certmanager_domain_http_not_working": "Le domaine {domain:s} ne semble pas être accessible via HTTP. Merci de vérifier la catégorie 'Web' dans le diagnostic pour plus d'informations. (Ou si vous savez ce que vous faites, utilisez '--no-checks' pour désactiver la vérification.)",
|
||||
"certmanager_error_no_A_record": "Aucun enregistrement DNS 'A' n’a été trouvé pour {domain:s}. Vous devez faire pointer votre nom de domaine vers votre machine pour être en mesure d’installer un certificat Let’s Encrypt ! (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrement DNS 'A' du domaine {domain:s} est différent de l’adresse IP de ce serveur. Si vous avez récemment modifié votre enregistrement 'A', veuillez attendre sa propagation (quelques vérificateurs de propagation DNS sont disponibles en ligne). (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "L'enregistrement DNS du domaine {domain:s} est différent de l’adresse IP de ce serveur. Pour plus d'informations, veuillez consulter la catégorie \"Enregistrements DNS\" dans la section diagnostic. Si vous avez récemment modifié votre enregistrement 'A', veuillez attendre sa propagation (des vérificateurs de propagation DNS sont disponibles en ligne). (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)",
|
||||
"certmanager_cannot_read_cert": "Quelque chose s’est mal passé lors de la tentative d’ouverture du certificat actuel pour le domaine {domain:s} (fichier : {file:s}), la cause est : {reason:s}",
|
||||
"certmanager_cert_install_success_selfsigned": "Le certificat auto-signé est maintenant installé pour le domaine « {domain:s} »",
|
||||
"certmanager_cert_install_success": "Le certificat Let’s Encrypt est maintenant installé pour le domaine « {domain:s} »",
|
||||
|
@ -178,17 +178,17 @@
|
|||
"certmanager_conflicting_nginx_file": "Impossible de préparer le domaine pour le défi ACME : le fichier de configuration NGINX {filepath:s} est en conflit et doit être préalablement retiré",
|
||||
"certmanager_hit_rate_limit": "Trop de certificats ont déjà été émis récemment pour ce même ensemble de domaines {domain:s}. Veuillez réessayer plus tard. Lisez https://letsencrypt.org/docs/rate-limits/ pour obtenir plus de détails sur les ratios et limitations",
|
||||
"ldap_init_failed_to_create_admin": "L’initialisation de l’annuaire LDAP n’a pas réussi à créer l’utilisateur admin",
|
||||
"domain_cannot_remove_main": "Vous ne pouvez pas supprimer '{domain:s}' car il s’agit du domaine principal. Vous devez d’abord définir un autre domaine comme domaine principal à l’aide de 'yunohost domain main-domain -n <another-domain>', voici la liste des domaines candidats. : {other_domains:s}",
|
||||
"domain_cannot_remove_main": "Vous ne pouvez pas supprimer '{domain:s}' car il s’agit du domaine principal. Vous devez d’abord définir un autre domaine comme domaine principal à l’aide de 'yunohost domain main-domain -n <another-domain>', voici la liste des domaines candidats : {other_domains:s}",
|
||||
"certmanager_self_ca_conf_file_not_found": "Le fichier de configuration pour l’autorité du certificat auto-signé est introuvable (fichier : {file:s})",
|
||||
"certmanager_unable_to_parse_self_CA_name": "Impossible d’analyser le nom de l’autorité du certificat auto-signé (fichier : {file:s})",
|
||||
"mailbox_used_space_dovecot_down": "Le service de courriel Dovecot doit être démarré si vous souhaitez voir l’espace disque occupé par la messagerie",
|
||||
"domains_available": "Domaines disponibles :",
|
||||
"backup_archive_broken_link": "Impossible d’accéder à l’archive de sauvegarde (lien invalide vers {path:s})",
|
||||
"certmanager_acme_not_configured_for_domain": "Le certificat du domaine {domain:s} ne semble pas être correctement installé. Veuillez d’abord exécuter cert-install.",
|
||||
"certmanager_acme_not_configured_for_domain": "Le challenge ACME n'a pas pu être validé pour le domaine {domain} pour le moment car le code de la configuration nginx est manquant... Merci de vérifier que votre configuration nginx est à jour avec la commande: `yunohost tools regen-conf nginx --dry-run --with-diff`.",
|
||||
"certmanager_http_check_timeout": "Expiration du délai lorsque le serveur a essayé de se contacter lui-même via HTTP en utilisant l’adresse IP public {ip:s} du domaine {domain:s}. Vous rencontrez peut-être un problème d’hairpinning ou alors le pare-feu/routeur en amont de votre serveur est mal configuré.",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Expiration du délai lors de la tentative de récupération du certificat intermédiaire depuis Let’s Encrypt. L’installation ou le renouvellement du certificat a été annulé. Veuillez réessayer plus tard.",
|
||||
"domain_hostname_failed": "Échec de l’utilisation d’un nouveau nom d’hôte. Cela pourrait causer des soucis plus tard (peut-être que ça n’en causera pas).",
|
||||
"yunohost_ca_creation_success": "L’autorité de certification locale créée.",
|
||||
"domain_hostname_failed": "Échec de l’utilisation d’un nouveau nom d’hôte. Cela pourrait causer des soucis plus tard (cela n’en causera peut-être pas).",
|
||||
"yunohost_ca_creation_success": "L'autorité de certification locale a été créée.",
|
||||
"app_already_installed_cant_change_url": "Cette application est déjà installée. L’URL ne peut pas être changé simplement par cette fonction. Vérifiez si cela est disponible avec `app changeurl`.",
|
||||
"app_change_url_failed_nginx_reload": "Le redémarrage de Nginx a échoué. Voici la sortie de 'nginx -t' :\n{nginx_errors:s}",
|
||||
"app_change_url_identical_domains": "L’ancien et le nouveau couple domaine/chemin_de_l’URL sont identiques pour ('{domain:s}{path:s}'), rien à faire.",
|
||||
|
@ -234,8 +234,8 @@
|
|||
"backup_with_no_restore_script_for_app": "L’application « {app:s} » n’a pas de script de restauration, vous ne pourrez pas restaurer automatiquement la sauvegarde de cette application.",
|
||||
"global_settings_cant_serialize_settings": "Échec de la sérialisation des données de paramétrage car : {reason:s}",
|
||||
"restore_removing_tmp_dir_failed": "Impossible de sauvegarder un ancien dossier temporaire",
|
||||
"restore_extracting": "Extraction des fichiers nécessaires depuis l’archive …",
|
||||
"restore_may_be_not_enough_disk_space": "Votre système semble ne pas avoir suffisamment d’espace disponible (L’espace libre est de {free_space:d} octets. Le besoin d’espace nécessaire est de {needed_space:d} octets. En appliquant une marge de sécurité, la quantité d’espace nécessaire est de {margin:d} octets)",
|
||||
"restore_extracting": "Extraction des fichiers nécessaires depuis l’archive…",
|
||||
"restore_may_be_not_enough_disk_space": "Votre système ne semble pas avoir suffisamment d’espace (libre : {free_space:d} B, espace nécessaire : {needed_space:d} B, marge de sécurité : {margin:d} B)",
|
||||
"restore_not_enough_disk_space": "Espace disponible insuffisant (L’espace libre est de {free_space:d} octets. Le besoin d’espace nécessaire est de {needed_space:d} octets. En appliquant une marge de sécurité, la quantité d’espace nécessaire est de {margin:d} octets)",
|
||||
"restore_system_part_failed": "Impossible de restaurer la partie '{part:s}' du système",
|
||||
"backup_couldnt_bind": "Impossible de lier {src:s} avec {dest:s}.",
|
||||
|
@ -244,7 +244,7 @@
|
|||
"migrations_loading_migration": "Chargement de la migration {id} …",
|
||||
"migrations_migration_has_failed": "La migration {id} a échoué avec l’exception {exception} : annulation",
|
||||
"migrations_no_migrations_to_run": "Aucune migration à lancer",
|
||||
"migrations_skip_migration": "Ignorer et passer la migration {id} …",
|
||||
"migrations_skip_migration": "Ignorer et passer la migration {id}…",
|
||||
"server_shutdown": "Le serveur va s’éteindre",
|
||||
"server_shutdown_confirm": "Le serveur va être éteint immédiatement, le voulez-vous vraiment ? [{answers:s}]",
|
||||
"server_reboot": "Le serveur va redémarrer",
|
||||
|
@ -256,7 +256,7 @@
|
|||
"app_upgrade_app_name": "Mise à jour de l’application {app} …",
|
||||
"backup_output_symlink_dir_broken": "Votre répertoire d’archivage '{path:s}' est un lien symbolique brisé. Peut-être avez-vous oublié de re/monter ou de brancher le support de stockage sur lequel il pointe.",
|
||||
"migrate_tsig_end": "La migration à HMAC-SHA-512 est terminée",
|
||||
"migrate_tsig_failed": "La migration du domaine DynDNS {domain} à hmac-sha512 a échoué. Annulation des modifications. Erreur : {error_code} - {error}",
|
||||
"migrate_tsig_failed": "La migration du domaine DynDNS {domain} à HMAC-SHA-512 a échoué. Annulation des modifications. Erreur : {error_code} - {error}",
|
||||
"migrate_tsig_start": "L’algorithme de génération des clefs n’est pas suffisamment sécurisé pour la signature TSIG du domaine '{domain}', lancement de la migration vers HMAC-SHA-512 qui est plus sécurisé",
|
||||
"migrate_tsig_wait": "Attendre trois minutes pour que le serveur DynDNS prenne en compte la nouvelle clef …",
|
||||
"migrate_tsig_wait_2": "2 minutes …",
|
||||
|
@ -269,19 +269,19 @@
|
|||
"migration_0003_start": "Démarrage de la migration vers Stretch. Les journaux seront disponibles dans {logfile}.",
|
||||
"migration_0003_patching_sources_list": "Modification du fichier sources.lists …",
|
||||
"migration_0003_main_upgrade": "Démarrage de la mise à niveau principale …",
|
||||
"migration_0003_fail2ban_upgrade": "Démarrage de la mise à niveau de fail2ban …",
|
||||
"migration_0003_restoring_origin_nginx_conf": "Votre fichier /etc/nginx/nginx.conf a été modifié d’une manière ou d’une autre. La migration va d’abord le réinitialiser à son état initial. Le fichier précédent sera disponible en tant que {backup_dest}.",
|
||||
"migration_0003_yunohost_upgrade": "Démarrage de la mise à niveau du paquet YunoHost. La migration se terminera, mais la mise à jour réelle aura lieu immédiatement après. Une fois cette opération terminée, vous pourriez avoir à vous reconnecter à l’administration via le panel web.",
|
||||
"migration_0003_fail2ban_upgrade": "Démarrage de la mise à niveau de Fail2Ban …",
|
||||
"migration_0003_restoring_origin_nginx_conf": "Votre fichier /etc/nginx/nginx.conf a été modifié d'une manière ou d’une autre. La migration va d’abord le réinitialiser à son état d'origine… Le fichier précédent sera disponible en tant que {backup_dest}.",
|
||||
"migration_0003_yunohost_upgrade": "Démarrage de la mise à niveau du package YunoHost… La migration se terminera, mais la mise à niveau réelle aura lieu immédiatement après. Une fois l'opération terminée, vous devrez peut-être vous reconnecter à la page webadmin.",
|
||||
"migration_0003_not_jessie": "La distribution Debian actuelle n’est pas Jessie !",
|
||||
"migration_0003_system_not_fully_up_to_date": "Votre système n’est pas complètement à jour. Veuillez mener une mise à jour classique avant de lancer la migration à Stretch.",
|
||||
"migration_0003_still_on_jessie_after_main_upgrade": "Quelque chose s’est mal passé pendant la mise à niveau principale : le système est toujours sur Debian Jessie !? Pour investiguer sur le problème, veuillez regarder les journaux {log}:s …",
|
||||
"migration_0003_general_warning": "Veuillez noter que cette migration est une opération délicate. Si l’équipe YunoHost a fait de son mieux pour la relire et la tester, la migration pourrait tout de même casser des parties de votre système ou de vos applications.\n\nEn conséquence, nous vous recommandons :\n - de lancer une sauvegarde de vos données ou applications critiques. Plus d’informations sur https://yunohost.org/backup ;\n - d’être patient après avoir lancé la migration : selon votre connexion internet et matériel, cela pourrait prendre jusqu’à quelques heures pour que tout soit à niveau.\n\nEn outre, le port SMTP utilisé par les clients de messagerie externes comme (Thunderbird ou K9-Mail) a été changé de 465 (SSL/TLS) à 587 (STARTTLS). L’ancien port 465 sera automatiquement fermé et le nouveau port 587 sera ouvert dans le pare-feu. Vous et vos utilisateurs *devront* adapter la configuration de vos clients de messagerie en conséquence.",
|
||||
"migration_0003_problematic_apps_warning": "Veuillez noter que les applications installées potentiellement problématiques suivantes ont été détectées. Il semble que celles-ci n’ont pas été installées à partir d’un catalogue d’applications, ou ne sont pas marquées comme \"fonctionnelle\". Par conséquent, il ne peut pas être garanti qu’ils fonctionneront toujours après la mise à niveau: {problematic_apps}",
|
||||
"migration_0003_problematic_apps_warning": "Veuillez noter que les applications installées potentiellement problématiques suivantes ont été détectées. Il semble que celles-ci n’ont pas été installées à partir d’un catalogue d’applications, ou ne sont pas marquées comme \"fonctionnelle\". Par conséquent, il ne peut pas être garanti qu’ils fonctionneront toujours après la mise à niveau : {problematic_apps}",
|
||||
"migration_0003_modified_files": "Veuillez noter que les fichiers suivants ont été détectés comme modifiés manuellement et pourraient être écrasés à la fin de la mise à niveau : {manually_modified_files}",
|
||||
"migrations_list_conflict_pending_done": "Vous ne pouvez pas utiliser --previous et --done simultanément.",
|
||||
"migrations_to_be_ran_manually": "La migration {id} doit être lancée manuellement. Veuillez aller dans Outils > Migrations dans l’interface admin, ou lancer `yunohost tools migrations migrate`.",
|
||||
"migrations_need_to_accept_disclaimer": "Pour lancer la migration {id}, vous devez accepter cette clause de non-responsabilité :\n---\n{disclaimer}\n---\nSi vous acceptez de lancer la migration, veuillez relancer la commande avec l’option --accept-disclaimer.",
|
||||
"service_description_avahi-daemon": "Vous permet d’atteindre votre serveur en utilisant «yunohost.local» sur votre réseau local",
|
||||
"service_description_avahi-daemon": "Vous permet d’atteindre votre serveur en utilisant « yunohost.local » sur votre réseau local",
|
||||
"service_description_dnsmasq": "Gère la résolution des noms de domaine (DNS)",
|
||||
"service_description_dovecot": "Permet aux clients de messagerie d’accéder/récupérer les courriels (via IMAP et POP3)",
|
||||
"service_description_fail2ban": "Protège contre les attaques brute-force et autres types d’attaques venant d’Internet",
|
||||
|
@ -304,7 +304,7 @@
|
|||
"log_link_to_failed_log": "L’opération '{desc}' a échoué ! Pour obtenir de l’aide, merci de partager le journal de l’opération en <a href=\"#/tools/logs/{name}\">cliquant ici</a>",
|
||||
"backup_php5_to_php7_migration_may_fail": "Impossible de convertir votre archive pour prendre en charge PHP 7, vous pourriez ne plus pouvoir restaurer vos applications PHP (cause : {error:s})",
|
||||
"log_help_to_get_failed_log": "L’opération '{desc}' a échoué ! Pour obtenir de l’aide, merci de partager le journal de l’opération en utilisant la commande 'yunohost log display {name} --share'",
|
||||
"log_does_exists": "Il n’existe pas de journal de l’opération ayant pour nom '{log}', utiliser 'yunohost log list' pour voir tous les fichiers de journaux disponibles",
|
||||
"log_does_exists": "Il n’y a pas de journal des opérations avec le nom '{log}', utilisez 'yunohost log list' pour voir tous les journaux d’opérations disponibles",
|
||||
"log_operation_unit_unclosed_properly": "L’opération ne s’est pas terminée correctement",
|
||||
"log_app_change_url": "Changer l’URL de l’application '{}'",
|
||||
"log_app_install": "Installer l’application '{}'",
|
||||
|
@ -321,30 +321,30 @@
|
|||
"log_dyndns_subscribe": "Souscrire au sous-domaine YunoHost '{}'",
|
||||
"log_dyndns_update": "Mettre à jour l’adresse IP associée à votre sous-domaine YunoHost '{}'",
|
||||
"log_letsencrypt_cert_install": "Installer le certificat Let’s Encrypt sur le domaine '{}'",
|
||||
"log_selfsigned_cert_install": "Installer le certificat auto-signé sur le domaine '{}'",
|
||||
"log_selfsigned_cert_install": "Installer un certificat auto-signé sur le domaine '{}'",
|
||||
"log_letsencrypt_cert_renew": "Renouveler le certificat Let’s Encrypt de '{}'",
|
||||
"log_user_create": "Ajouter l’utilisateur '{}'",
|
||||
"log_user_delete": "Supprimer l’utilisateur '{}'",
|
||||
"log_user_update": "Mettre à jour les informations de l’utilisateur '{}'",
|
||||
"log_domain_main_domain": "Faire de '{}' le domaine principal",
|
||||
"log_tools_migrations_migrate_forward": "Éxecuter les migrations",
|
||||
"log_tools_migrations_migrate_forward": "Exécuter les migrations",
|
||||
"log_tools_postinstall": "Faire la post-installation de votre serveur YunoHost",
|
||||
"log_tools_upgrade": "Mettre à jour les paquets du système",
|
||||
"log_tools_shutdown": "Éteindre votre serveur",
|
||||
"log_tools_reboot": "Redémarrer votre serveur",
|
||||
"mail_unavailable": "Cette adresse de courriel est réservée et doit être automatiquement attribuée au tout premier utilisateur",
|
||||
"migration_description_0004_php5_to_php7_pools": "Reconfigurer les espaces utilisateurs PHP pour utiliser PHP 7 au lieu de PHP 5",
|
||||
"migration_description_0004_php5_to_php7_pools": "Reconfigurer l'ensemble PHP pour utiliser PHP 7 au lieu de PHP 5",
|
||||
"migration_description_0005_postgresql_9p4_to_9p6": "Migration des bases de données de PostgreSQL 9.4 vers PostgreSQL 9.6",
|
||||
"migration_0005_postgresql_94_not_installed": "PostgreSQL n’a pas été installé sur votre système. Rien à faire !",
|
||||
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 a été trouvé et installé, mais pas PostgreSQL 9.6 !? Quelque chose d’étrange a dû arriver à votre système… :(",
|
||||
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 est installé, mais pas PostgreSQL 9.6 ‽ Quelque chose de bizarre aurait pu se produire sur votre système :(…",
|
||||
"migration_0005_not_enough_space": "Laissez suffisamment d’espace disponible dans {path} pour exécuter la migration.",
|
||||
"service_description_php7.0-fpm": "Exécute des applications écrites en PHP avec NGINX",
|
||||
"users_available": "Liste des utilisateurs disponibles :",
|
||||
"good_practices_about_admin_password": "Vous êtes maintenant sur le point de définir un nouveau mot de passe d’administration. Le mot de passe doit comporter au moins 8 caractères – bien qu’il soit recommandé d’utiliser un mot de passe plus long (c’est-à-dire une phrase secrète) et/ou d’utiliser différents types de caractères (majuscules, minuscules, chiffres et caractères spéciaux).",
|
||||
"good_practices_about_user_password": "Vous êtes maintenant sur le point de définir un nouveau mot de passe utilisateur. Le mot de passe doit comporter au moins 8 caractères - bien qu’il soit recommandé d’utiliser un mot de passe plus long (c’est-à-dire une phrase secrète) et/ou d’utiliser différents types de caractères tels que : majuscules, minuscules, chiffres et caractères spéciaux.",
|
||||
"good_practices_about_admin_password": "Vous êtes maintenant sur le point de définir un nouveau mot de passe d'administration. Le mot de passe doit comporter au moins 8 caractères, bien qu'il soit recommandé d'utiliser un mot de passe plus long (c'est-à-dire une phrase de passe) et / ou d'utiliser une variation de caractères (majuscule, minuscule, chiffres et caractères spéciaux).",
|
||||
"good_practices_about_user_password": "Vous êtes maintenant sur le point de définir un nouveau mot de passe utilisateur. Le mot de passe doit comporter au moins 8 caractères, bien qu'il soit recommandé d'utiliser un mot de passe plus long (c'est-à-dire une phrase secrète) et / ou une variation de caractères (majuscule, minuscule, chiffres et caractères spéciaux).",
|
||||
"migration_description_0006_sync_admin_and_root_passwords": "Synchroniser les mots de passe admin et root",
|
||||
"migration_0006_disclaimer": "YunoHost s’attend maintenant à ce que les mots de passe administrateur et racine soient synchronisés. Cette migration remplace votre mot de passe root par le mot de passe administrateur.",
|
||||
"password_listed": "Ce mot de passe est l’un des mots de passe les plus utilisés dans le monde. Veuillez choisir quelque chose d’un peu plus singulier.",
|
||||
"password_listed": "Ce mot de passe fait partie des mots de passe les plus utilisés au monde. Veuillez choisir quelque chose de plus unique.",
|
||||
"password_too_simple_1": "Le mot de passe doit comporter au moins 8 caractères",
|
||||
"password_too_simple_2": "Le mot de passe doit comporter au moins 8 caractères et contenir des chiffres, des majuscules et des minuscules",
|
||||
"password_too_simple_3": "Le mot de passe doit comporter au moins 8 caractères et contenir des chiffres, des majuscules, des minuscules et des caractères spéciaux",
|
||||
|
@ -363,8 +363,8 @@
|
|||
"backup_mount_archive_for_restore": "Préparation de l’archive pour restauration …",
|
||||
"confirm_app_install_warning": "Avertissement : cette application peut fonctionner mais n’est pas bien intégrée dans YunoHost. Certaines fonctionnalités telles que l’authentification unique et la sauvegarde/restauration peuvent ne pas être disponibles. L’installer quand même ? [{answers:s}] ",
|
||||
"confirm_app_install_danger": "DANGER ! Cette application est connue pour être encore expérimentale (si elle ne fonctionne pas explicitement) ! Vous ne devriez probablement PAS l’installer à moins de savoir ce que vous faites. AUCUN SUPPORT ne sera fourni si cette application ne fonctionne pas ou casse votre système … Si vous êtes prêt à prendre ce risque de toute façon, tapez '{answers:s}'",
|
||||
"confirm_app_install_thirdparty": "DANGER ! Cette application ne fait pas partie du catalogue d’applications de YunoHost. L’installation d’applications tierces peut compromettre l’intégrité et la sécurité de votre système. Vous ne devriez probablement PAS l’installer à moins de savoir ce que vous faites. AUCUN SUPPORT ne sera fourni si cette application ne fonctionne pas ou casse votre système … Si vous êtes prêt à prendre ce risque de toute façon, tapez '{answers:s}'",
|
||||
"dpkg_is_broken": "Vous ne pouvez pas faire ça maintenant car dpkg/apt (le gestionnaire de paquets du système) semble avoir laissé des choses non configurées. Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo dpkg --configure -a'.",
|
||||
"confirm_app_install_thirdparty": "DANGER! Cette application ne fait pas partie du catalogue d'applications de Yunohost. L'installation d'applications tierces peut compromettre l'intégrité et la sécurité de votre système. Vous ne devriez probablement PAS l'installer à moins de savoir ce que vous faites. AUCUN SUPPORT ne sera fourni si cette application ne fonctionne pas ou casse votre système ... Si vous êtes prêt à prendre ce risque de toute façon, tapez '{answers:s}'",
|
||||
"dpkg_is_broken": "Vous ne pouvez pas faire ça maintenant car dpkg/apt (le gestionnaire de paquets du système) semble avoir laissé des choses non configurées. Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo apt install --fix-broken` et/ou `sudo dpkg --configure -a'.",
|
||||
"dyndns_could_not_check_available": "Impossible de vérifier si {domain:s} est disponible chez {provider:s}.",
|
||||
"file_does_not_exist": "Le fichier dont le chemin est {path:s} n’existe pas.",
|
||||
"global_settings_setting_security_password_admin_strength": "Qualité du mot de passe administrateur",
|
||||
|
@ -380,7 +380,7 @@
|
|||
"migration_0008_root": "- Vous ne pourrez pas vous connecter en tant que root via SSH. Au lieu de cela, vous devrez utiliser l’utilisateur admin ;",
|
||||
"migration_0008_dsa": "- La clé DSA sera désactivée. Par conséquent, il se peut que vous ayez besoin d’invalider un avertissement effrayant de votre client SSH afin de revérifier l’empreinte de votre serveur ;",
|
||||
"migration_0008_warning": "Si vous comprenez ces avertissements et souhaitez que YunoHost écrase votre configuration actuelle, exécutez la migration. Sinon, vous pouvez également ignorer la migration, bien que cela ne soit pas recommandé.",
|
||||
"migration_0008_no_warning": "Remplacer votre configuration SSH devrait être sûr, bien que cela ne puisse être promis ! Exécutez la migration pour la remplacer. Sinon, vous pouvez également ignorer la migration, bien que cela ne soit pas recommandé.",
|
||||
"migration_0008_no_warning": "Remplacer votre configuration SSH ne devrait pas poser de problème, bien qu'il soit difficile de le promettre ! Exécutez la migration pour la remplacer. Sinon, vous pouvez également ignorer la migration, bien que cela ne soit pas recommandé.",
|
||||
"migrations_success": "Migration {number} {name} réussie !",
|
||||
"pattern_password_app": "Désolé, les mots de passe ne peuvent pas contenir les caractères suivants : {forbidden_chars}",
|
||||
"root_password_replaced_by_admin_password": "Votre mot de passe root a été remplacé par votre mot de passe administrateur.",
|
||||
|
@ -390,9 +390,9 @@
|
|||
"service_restarted": "Le service « {service:s} » a été redémarré",
|
||||
"service_reload_or_restart_failed": "Impossible de recharger ou de redémarrer le service '{service:s}'\n\nJournaux historisés récents de ce service : {logs:s}",
|
||||
"service_reloaded_or_restarted": "Le service « {service:s} » a été rechargé ou redémarré",
|
||||
"this_action_broke_dpkg": "Cette action a laissé des paquets non configurés par dpkg/apt (les gestionnaires de paquets système). Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo dpkg --configure -a`.",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "Ces services requis doivent être en cours d’exécution pour exécuter cette action: {services}. Essayez de les redémarrer pour continuer (et éventuellement rechercher pourquoi ils sont en panne).",
|
||||
"admin_password_too_long": "Veuillez choisir un mot de passe de moins de 127 caractères",
|
||||
"this_action_broke_dpkg": "Cette action a laissé des paquets non configurés par dpkg/apt (les gestionnaires de paquets système). Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo apt install --fix-broken` et/ou `sudo dpkg --configure -a`.",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "Ces services requis doivent être en cours d’exécution pour exécuter cette action : {services}. Essayez de les redémarrer pour continuer (et éventuellement rechercher pourquoi ils sont en panne).",
|
||||
"admin_password_too_long": "Veuillez choisir un mot de passe comportant moins de 127 caractères",
|
||||
"log_regen_conf": "Régénérer les configurations du système '{}'",
|
||||
"migration_0009_not_needed": "Cette migration semble avoir déjà été jouée ? On l’ignore.",
|
||||
"regenconf_file_backed_up": "Le fichier de configuration '{conf}' a été sauvegardé sous '{backup}'",
|
||||
|
@ -405,7 +405,7 @@
|
|||
"regenconf_now_managed_by_yunohost": "Le fichier de configuration '{conf}' est maintenant géré par YunoHost (catégorie {category}).",
|
||||
"regenconf_up_to_date": "La configuration est déjà à jour pour la catégorie '{category}'",
|
||||
"already_up_to_date": "Il n’y a rien à faire ! Tout est déjà à jour !",
|
||||
"global_settings_setting_security_nginx_compatibility": "Compatibilité versus compromis sécuritaire pour le serveur web nginx. Affecte les cryptogrammes (et d’autres aspects liés à la sécurité)",
|
||||
"global_settings_setting_security_nginx_compatibility": "Compatibilité versus compromis sécuritaire pour le serveur web Nginx. Affecte les cryptogrammes (et d’autres aspects liés à la sécurité)",
|
||||
"global_settings_setting_security_ssh_compatibility": "Compatibilité versus compromis sécuritaire pour le serveur SSH. Affecte les cryptogrammes (et d’autres aspects liés à la sécurité)",
|
||||
"global_settings_setting_security_postfix_compatibility": "Compatibilité versus compromis sécuritaire pour le serveur Postfix. Affecte les cryptogrammes (et d’autres aspects liés à la sécurité)",
|
||||
"migration_description_0009_decouple_regenconf_from_services": "Dissocier le mécanisme « regen-conf » des services",
|
||||
|
@ -413,20 +413,20 @@
|
|||
"regenconf_file_kept_back": "Le fichier de configuration '{conf}' devait être supprimé par « regen-conf » (catégorie {category}) mais a été conservé.",
|
||||
"regenconf_updated": "La configuration a été mise à jour pour '{category}'",
|
||||
"regenconf_would_be_updated": "La configuration aurait dû être mise à jour pour la catégorie '{category}'",
|
||||
"regenconf_dry_pending_applying": "Vérification de la configuration en attente qui aurait été appliquée pour la catégorie '{category}' …",
|
||||
"regenconf_dry_pending_applying": "Vérification de la configuration en attente qui aurait été appliquée pour la catégorie '{category}'…",
|
||||
"regenconf_failed": "Impossible de régénérer la configuration pour la ou les catégorie(s) : '{categories}'",
|
||||
"regenconf_pending_applying": "Applique la configuration en attente pour la catégorie '{category}' …",
|
||||
"service_regen_conf_is_deprecated": "'yunohost service regen-conf' est obsolète ! Veuillez plutôt utiliser 'yunohost tools regen-conf' à la place.",
|
||||
"tools_upgrade_at_least_one": "Veuillez spécifier '--apps' OU '--system'",
|
||||
"tools_upgrade_at_least_one": "Veuillez spécifier '--apps' ou '--system'",
|
||||
"tools_upgrade_cant_both": "Impossible de mettre à niveau le système et les applications en même temps",
|
||||
"tools_upgrade_cant_hold_critical_packages": "Impossibilité de maintenir les paquets critiques…",
|
||||
"tools_upgrade_regular_packages": "Mise à jour des paquets du système (non liés a YunoHost) …",
|
||||
"tools_upgrade_regular_packages": "Mise à jour des paquets du système (non liés a YunoHost)…",
|
||||
"tools_upgrade_regular_packages_failed": "Impossible de mettre à jour les paquets suivants : {packages_list}",
|
||||
"tools_upgrade_special_packages": "Mise à jour des paquets 'spécifiques' (liés a YunoHost) …",
|
||||
"tools_upgrade_special_packages": "Mise à jour des paquets 'spécifiques' (liés a YunoHost)…",
|
||||
"tools_upgrade_special_packages_completed": "La mise à jour des paquets de YunoHost est finie !\nPressez [Entrée] pour revenir à la ligne de commande",
|
||||
"dpkg_lock_not_available": "Cette commande ne peut être exécutée actuellement car un autre programme semble utiliser le verrou de dpkg (gestionnaire de paquets)",
|
||||
"dpkg_lock_not_available": "Cette commande ne peut pas être exécutée pour le moment car un autre programme semble utiliser le verrou de dpkg (le gestionnaire de package système)",
|
||||
"tools_upgrade_cant_unhold_critical_packages": "Impossible de conserver les paquets critiques…",
|
||||
"tools_upgrade_special_packages_explanation": "La mise à jour spéciale va continuer en arrière-plan. Veuillez ne pas lancer d’autres actions sur votre serveur pendant environ 10 minutes (en fonction de la vitesse du matériel). Après cela, il vous faudra peut-être vous reconnecter à la webadmin. Le journal de mise à niveau sera disponible dans Outils → Journal (dans la webadmin) ou via \"yunohost log list\" (en ligne de commande).",
|
||||
"tools_upgrade_special_packages_explanation": "La mise à niveau spécifique à YunoHost se poursuivra en arrière-plan. Veuillez ne pas lancer d'autres actions sur votre serveur pendant les 10 prochaines minutes (selon la vitesse du matériel). Après cela, vous devrez peut-être vous reconnecter à l'administrateur Web. Le journal de mise à niveau sera disponible dans Outils → Journal (dans le webadmin) ou en utilisant la « liste des journaux yunohost » (à partir de la ligne de commande).",
|
||||
"update_apt_cache_failed": "Impossible de mettre à jour le cache APT (gestionnaire de paquets Debian). Voici un extrait du fichier sources.list qui pourrait vous aider à identifier les lignes problématiques :\n{sourceslist}",
|
||||
"update_apt_cache_warning": "Des erreurs se sont produites lors de la mise à jour du cache APT (gestionnaire de paquets Debian). Voici un extrait des lignes du fichier sources.list qui pourrait vous aider à identifier les lignes problématiques :\n{sourceslist}",
|
||||
"backup_permission": "Permission de sauvegarde pour l’application {app:s}",
|
||||
|
@ -435,8 +435,8 @@
|
|||
"group_unknown": "Le groupe {group:s} est inconnu",
|
||||
"group_updated": "Le groupe '{group}' a été mis à jour",
|
||||
"group_update_failed": "La mise à jour du groupe '{group}' a échoué : {error}",
|
||||
"group_creation_failed": "Échec de la création du groupe '{group}': {error}",
|
||||
"group_deletion_failed": "Échec de la suppression du groupe '{group}': {error}",
|
||||
"group_creation_failed": "Échec de la création du groupe '{group}' : {error}",
|
||||
"group_deletion_failed": "Échec de la suppression du groupe '{group}' : {error}",
|
||||
"log_user_group_delete": "Supprimer le groupe '{}'",
|
||||
"log_user_group_update": "Mettre à jour '{}' pour le groupe",
|
||||
"mailbox_disabled": "La boîte aux lettres est désactivée pour l’utilisateur {user:s}",
|
||||
|
@ -449,23 +449,23 @@
|
|||
"migrations_pending_cant_rerun": "Ces migrations étant toujours en attente, vous ne pouvez pas les exécuter à nouveau : {ids}",
|
||||
"migration_description_0012_postgresql_password_to_md5_authentication": "Forcer l’authentification PostgreSQL à utiliser MD5 pour les connexions locales",
|
||||
"migrations_exclusive_options": "'auto', '--skip' et '--force-rerun' sont des options mutuellement exclusives.",
|
||||
"migrations_not_pending_cant_skip": "Ces migrations ne sont pas en attente et ne peuvent donc pas être ignorées: {ids}",
|
||||
"migration_0011_can_not_backup_before_migration": "La sauvegarde du système n’a pas pu être terminée avant l’échec de la migration. Erreur: {error:s}",
|
||||
"migration_0011_migrate_permission": "Migration des autorisations des paramètres des applications vers LDAP …",
|
||||
"migrations_not_pending_cant_skip": "Ces migrations ne sont pas en attente et ne peuvent donc pas être ignorées : {ids}",
|
||||
"migration_0011_can_not_backup_before_migration": "La sauvegarde du système n’a pas pu être terminée avant l’échec de la migration. Erreur : {error:s}",
|
||||
"migration_0011_migrate_permission": "Migration des autorisations des paramètres des applications vers LDAP…",
|
||||
"migration_0011_migration_failed_trying_to_rollback": "La migration a échoué… Tentative de restauration du système.",
|
||||
"migration_0011_rollback_success": "Système restauré.",
|
||||
"migration_0011_update_LDAP_database": "Mise à jour de la base de données LDAP…",
|
||||
"migration_0011_backup_before_migration": "Création d’une sauvegarde des paramètres de la base de données LDAP et des applications avant la migration.",
|
||||
"permission_not_found": "Autorisation '{permission:s}' introuvable",
|
||||
"permission_update_failed": "Impossible de mettre à jour la permission '{permission}' : {error}",
|
||||
"permission_update_failed": "Impossible de mettre à jour l’autorisation '{permission}' : {error}",
|
||||
"permission_updated": "Permission '{permission:s}' mise à jour",
|
||||
"permission_update_nothing_to_do": "Aucune autorisation pour mettre à jour",
|
||||
"dyndns_provider_unreachable": "Impossible d’atteindre le fournisseur DynDNS {provider}: votre YunoHost n’est pas correctement connecté à Internet ou le serveur Dynette est en panne.",
|
||||
"migration_0011_update_LDAP_schema": "Mise à jour du schéma LDAP …",
|
||||
"dyndns_provider_unreachable": "Impossible d’atteindre le fournisseur DynDNS {provider} : votre YunoHost n’est pas correctement connecté à Internet ou le serveur Dynette est en panne.",
|
||||
"migration_0011_update_LDAP_schema": "Mise à jour du schéma LDAP…",
|
||||
"migrations_already_ran": "Ces migrations sont déjà effectuées : {ids}",
|
||||
"migrations_dependencies_not_satisfied": "Exécutez ces migrations : '{dependencies_id}', avant migration {id}.",
|
||||
"migrations_failed_to_load_migration": "Impossible de charger la migration {id}: {error}",
|
||||
"migrations_running_forward": "Exécution de la migration {id} …",
|
||||
"migrations_failed_to_load_migration": "Impossible de charger la migration {id} : {error}",
|
||||
"migrations_running_forward": "Exécution de la migration {id}…",
|
||||
"migrations_success_forward": "Migration {id} terminée",
|
||||
"operation_interrupted": "L’opération a été interrompue manuellement ?",
|
||||
"permission_already_exist": "L’autorisation '{permission}' existe déjà",
|
||||
|
@ -474,7 +474,7 @@
|
|||
"permission_deleted": "Permission '{permission:s}' supprimée",
|
||||
"permission_deletion_failed": "Impossible de supprimer la permission '{permission}' : {error}",
|
||||
"migration_description_0011_setup_group_permission": "Initialiser les groupes d’utilisateurs et autorisations pour les applications et les services",
|
||||
"migration_0011_LDAP_update_failed": "Impossible de mettre à jour LDAP. Erreur: {error:s}",
|
||||
"migration_0011_LDAP_update_failed": "Impossible de mettre à jour LDAP. Erreur : {error:s}",
|
||||
"group_already_exist": "Le groupe {group} existe déjà",
|
||||
"group_already_exist_on_system": "Le groupe {group} existe déjà dans les groupes système",
|
||||
"group_cannot_be_deleted": "Le groupe {group} ne peut pas être supprimé manuellement.",
|
||||
|
@ -485,20 +485,20 @@
|
|||
"log_user_group_create": "Créer '{}' groupe",
|
||||
"log_user_permission_update": "Mise à jour des accès pour la permission '{}'",
|
||||
"log_user_permission_reset": "Réinitialiser la permission '{}'",
|
||||
"migration_0011_failed_to_remove_stale_object": "Impossible de supprimer un objet périmé {dn}: {error}",
|
||||
"permission_already_allowed": "Le groupe '{group}' a déjà l'autorisation '{permission}' activée",
|
||||
"permission_already_disallowed": "Le groupe '{group}' a déjà l'autorisation '{permission}' désactivé",
|
||||
"permission_cannot_remove_main": "Supprimer une autorisation principale n'est pas autorisé",
|
||||
"user_already_exists": "L'utilisateur '{user}' existe déjà",
|
||||
"app_full_domain_unavailable": "Désolé, cette application doit être installée sur un domaine qui lui est propre, mais d'autres applications sont déjà installées sur le domaine '{domain}'. Vous pouvez utiliser un sous-domaine dédié à cette application à la place.",
|
||||
"group_cannot_edit_all_users": "Le groupe 'all_users' ne peut pas être édité manuellement. C'est un groupe spécial destiné à contenir tous les utilisateurs enregistrés dans YunoHost",
|
||||
"group_cannot_edit_visitors": "Le groupe 'visiteurs' ne peut pas être édité manuellement. C'est un groupe spécial représentant les visiteurs anonymes",
|
||||
"group_cannot_edit_primary_group": "Le groupe '{group}' ne peut pas être édité manuellement. C'est le groupe principal destiné à ne contenir qu'un utilisateur spécifique.",
|
||||
"log_permission_url": "Mise à jour de l'URL associée à l'autorisation '{}'",
|
||||
"migration_0011_failed_to_remove_stale_object": "Impossible de supprimer un objet périmé {dn} : {error}",
|
||||
"permission_already_allowed": "Le groupe '{group}' a déjà l’autorisation '{permission}' activée",
|
||||
"permission_already_disallowed": "Le groupe '{group}' a déjà l’autorisation '{permission}' désactivé",
|
||||
"permission_cannot_remove_main": "Supprimer une autorisation principale n’est pas autorisé",
|
||||
"user_already_exists": "L’utilisateur '{user}' existe déjà",
|
||||
"app_full_domain_unavailable": "Désolé, cette application doit être installée sur un domaine qui lui est propre, mais d’autres applications sont déjà installées sur le domaine '{domain}'. Vous pouvez utiliser un sous-domaine dédié à cette application à la place.",
|
||||
"group_cannot_edit_all_users": "Le groupe 'all_users' ne peut pas être édité manuellement. C’est un groupe spécial destiné à contenir tous les utilisateurs enregistrés dans YunoHost",
|
||||
"group_cannot_edit_visitors": "Le groupe 'visiteurs' ne peut pas être édité manuellement. C’est un groupe spécial représentant les visiteurs anonymes",
|
||||
"group_cannot_edit_primary_group": "Le groupe '{group}' ne peut pas être édité manuellement. C’est le groupe principal destiné à ne contenir qu’un utilisateur spécifique.",
|
||||
"log_permission_url": "Mise à jour de l’URL associée à l’autorisation '{}'",
|
||||
"migration_0011_slapd_config_will_be_overwritten": "Il semble que vous ayez modifié manuellement la configuration de slapd. Pour cette migration critique, YunoHost doit forcer la mise à jour de la configuration de slapd. Les fichiers originaux seront sauvegardés dans {conf_backup_folder}.",
|
||||
"permission_already_up_to_date": "L’autorisation n’a pas été mise à jour car les demandes d’ajout/suppression correspondent déjà à l’état actuel.",
|
||||
"permission_currently_allowed_for_all_users": "Cette autorisation est actuellement accordée à tous les utilisateurs en plus des autres groupes. Vous voudrez probablement soit supprimer l’autorisation 'all_users', soit supprimer les autres groupes auxquels il est actuellement autorisé.",
|
||||
"app_install_failed": "Impossible d’installer {app}: {error}",
|
||||
"app_install_failed": "Impossible d’installer {app} : {error}",
|
||||
"app_install_script_failed": "Une erreur est survenue dans le script d’installation de l’application",
|
||||
"permission_require_account": "Permission {permission} n’a de sens que pour les utilisateurs ayant un compte et ne peut donc pas être activé pour les visiteurs.",
|
||||
"app_remove_after_failed_install": "Supprimer l’application après l’échec de l’installation …",
|
||||
|
@ -507,61 +507,51 @@
|
|||
"diagnosis_found_errors": "Trouvé {errors} problème(s) significatif(s) lié(s) à {category} !",
|
||||
"diagnosis_found_errors_and_warnings": "Trouvé {errors} problème(s) significatif(s) (et {warnings} (avertissement(s)) en relation avec {category} !",
|
||||
"diagnosis_ip_not_connected_at_all": "Le serveur ne semble pas du tout connecté à Internet !?",
|
||||
"diagnosis_ip_weird_resolvconf": "La résolution DNS semble fonctionner, mais soyez prudent en utilisant un fichier /etc/resolv.conf personnalisé.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "Au lieu de cela, ce fichier devrait être un lien symbolique vers /etc/resolvconf/run/resolv.conf lui-même pointant vers 127.0.0.1 (dnsmasq). Les résolveurs réels doivent être configurés dans /etc/resolv.dnsmasq.conf.",
|
||||
"diagnosis_dns_missing_record": "Selon la configuration DNS recommandée, vous devez ajouter un enregistrement DNS\nType: {type}\nNom: {name}\nValeur {value}",
|
||||
"diagnosis_diskusage_ok": "Le stockage {mountpoint} (sur le périphérique {device}) a encore {free} ({free_percent}%) d’espace libre !",
|
||||
"diagnosis_ip_weird_resolvconf": "La résolution DNS semble fonctionner, mais il semble que vous utilisez un <code>/etc/resolv.conf</code> personnalisé.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "Le fichier <code>/etc/resolv.conf</code> doit être un lien symbolique vers <code>/etc/resolvconf/run/resolv.conf</code> lui-même pointant vers <code>127.0.0.1</code> (dnsmasq). Si vous souhaitez configurer manuellement les résolveurs DNS, veuillez modifier <code>/etc/resolv.dnsmasq.conf</code>.",
|
||||
"diagnosis_dns_missing_record": "Selon la configuration DNS recommandée, vous devez ajouter un enregistrement DNS<br>Type : <code>{type}</code><br>Nom : <code>{name}</code><br>Valeur: <code>{value}</code>",
|
||||
"diagnosis_diskusage_ok": "L’espace de stockage <code>{mountpoint}</code> (sur le périphérique <code>{device}</code>) a encore {free} ({free_percent}%) espace restant (sur {total}) !",
|
||||
"diagnosis_ram_ok": "Le système dispose encore de {available} ({available_percent}%) de RAM sur {total}.",
|
||||
"diagnosis_regenconf_allgood": "Tous les fichiers de configuration sont conformes à la configuration recommandée !",
|
||||
"diagnosis_security_vulnerable_to_meltdown": "Vous semblez vulnérable à la vulnérabilité de sécurité critique de Meltdown",
|
||||
"diagnosis_basesystem_host": "Le serveur utilise Debian {debian_version}",
|
||||
"diagnosis_basesystem_kernel": "Le serveur utilise le noyau Linux {kernel_version}",
|
||||
"diagnosis_basesystem_ynh_single_version": "{package} version: {version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_single_version": "{package} version : {version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_main_version": "Le serveur utilise YunoHost {main_version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_inconsistent_versions": "Vous exécutez des versions incohérentes des packages YunoHost … probablement à cause d’une mise à niveau partielle ou échouée.",
|
||||
"diagnosis_basesystem_ynh_inconsistent_versions": "Vous exécutez des versions incohérentes des packages YunoHost ... très probablement en raison d’une mise à niveau échouée ou partielle.",
|
||||
"diagnosis_display_tip_cli": "Vous pouvez exécuter 'yunohost diagnosis show --issues' pour afficher les problèmes détectés.",
|
||||
"diagnosis_failed_for_category": "Échec du diagnostic pour la catégorie '{category}': {error}",
|
||||
"diagnosis_cache_still_valid": "(Le cache est encore valide pour le diagnostic {category}. Il ne sera pas re-diagnostiqué pour le moment!)",
|
||||
"diagnosis_ignored_issues": "(+ {nb_ignored} questions ignorée(s))",
|
||||
"diagnosis_ignored_issues": "(+ {nb_ignored} problèmes ignorée(s))",
|
||||
"diagnosis_found_warnings": "Trouvé {warnings} objet(s) pouvant être amélioré(s) pour {category}.",
|
||||
"diagnosis_everything_ok": "Tout semble bien pour {category} !",
|
||||
"diagnosis_failed": "Impossible d’extraire le résultat du diagnostic pour la catégorie '{category}': {error}",
|
||||
"diagnosis_failed": "Échec de la récupération du résultat du diagnostic pour la catégorie '{category}' : {error}",
|
||||
"diagnosis_ip_connected_ipv4": "Le serveur est connecté à Internet en IPv4 !",
|
||||
"diagnosis_ip_no_ipv4": "Le serveur ne dispose pas d’une adresse IPv4.",
|
||||
"diagnosis_ip_connected_ipv6": "Le serveur est connecté à Internet en IPv6 !",
|
||||
"diagnosis_ip_no_ipv6": "Le serveur ne dispose pas d’une adresse IPv6.",
|
||||
"diagnosis_ip_dnsresolution_working": "La résolution de nom de domaine fonctionne !",
|
||||
"diagnosis_ip_broken_dnsresolution": "La résolution du nom de domaine semble interrompue pour une raison quelconque … Un pare-feu bloque-t-il les requêtes DNS ?",
|
||||
"diagnosis_ip_broken_resolvconf": "La résolution du nom de domaine semble cassée sur votre serveur, ce qui semble lié au fait que /etc/resolv.conf ne pointe pas vers 127.0.0.1.",
|
||||
"diagnosis_dns_good_conf": "Bonne configuration DNS pour le domaine {domain} (catégorie {category})",
|
||||
"diagnosis_dns_bad_conf": "Configuration DNS incorrecte ou manquante pour le domaine {domain} (catégorie {category})",
|
||||
"diagnosis_dns_discrepancy": "L’enregistrement DNS de type {0} et nom {1} ne correspond pas à la configuration recommandée. Valeur actuelle: {2}. Valeur exceptée: {3}. Vous pouvez consulter https://yunohost.org/dns_config pour plus d’informations.",
|
||||
"diagnosis_ip_broken_resolvconf": "La résolution du nom de domaine semble être cassée sur votre serveur, ce qui semble lié au fait que <code>/etc/resolv.conf</code> ne pointe pas vers <code>127.0.0.1</code>.",
|
||||
"diagnosis_dns_good_conf": "Les enregistrements DNS sont correctement configurés pour le domaine {domain} (catégorie {category})",
|
||||
"diagnosis_dns_bad_conf": "Certains enregistrements DNS sont manquants ou incorrects pour le domaine {domain} (catégorie {category})",
|
||||
"diagnosis_dns_discrepancy": "Cet enregistrement DNS ne semble pas correspondre à la configuration recommandée : <br>Type : <code>{type}</code><br>Nom : <code>{name}</code><br> La valeur actuelle est : <code>{current}</code><br> La valeur attendue est : <code>{value}</code>",
|
||||
"diagnosis_services_bad_status": "Le service {service} est {status} :-(",
|
||||
"diagnosis_diskusage_verylow": "Le stockage {mountpoint} (sur le périphérique {device}) ne dispose que de {free_abs_GB} Go ({free_percent}%). Vous devriez vraiment envisager de nettoyer un peu d’espace.",
|
||||
"diagnosis_diskusage_low": "Le stockage {mountpoint} (sur le périphérique {device}) ne dispose que de {free_abs_GB} Go ({free_percent}%). Faites attention.",
|
||||
"diagnosis_ram_verylow": "Le système ne dispose plus que de {available_abs_MB} MB ({available_percent}%) ! (sur {total_abs_MB} Mo)",
|
||||
"diagnosis_ram_low": "Le système n’a plus de {available_abs_MB} MB ({available_percent}%) RAM sur {total_abs_MB} MB. Faites attention.",
|
||||
"diagnosis_swap_none": "Le système n’a aucun échange. Vous devez envisager d’ajouter au moins 256 Mo de swap pour éviter les situations où le système manque de mémoire.",
|
||||
"diagnosis_swap_notsomuch": "Le système ne dispose que de {total_MB} Mo de swap. Vous devez envisager d’avoir au moins 256 Mo pour éviter les situations où le système manque de mémoire.",
|
||||
"diagnosis_swap_ok": "Le système dispose de {total_MB} Mo de swap !",
|
||||
"diagnosis_dns_discrepancy": "L’enregistrement DNS de type {type} et nom {name} ne correspond pas à la configuration recommandée.\nValeur actuelle: {current}\nValeur attendue: {value}",
|
||||
"diagnosis_services_bad_status": "Le service {service} est {status} :-(",
|
||||
"diagnosis_diskusage_verylow": "Le stockage {mountpoint} (sur le périphérique {device}) ne dispose que de {free} ({free_percent}%). Vous devriez vraiment envisager de nettoyer un peu d’espace.",
|
||||
"diagnosis_diskusage_low": "Le stockage {mountpoint} (sur le périphérique {device}) ne dispose que de {free} ({free_percent}%). Faites attention.",
|
||||
"diagnosis_diskusage_verylow": "L'espace de stockage <code>{mountpoint}</code> (sur l’appareil <code>{device}</code>) ne dispose que de {free} ({free_percent}%) espace restant (sur {total}). Vous devriez vraiment envisager de nettoyer de l’espace !",
|
||||
"diagnosis_diskusage_low": "L'espace de stockage <code>{mountpoint}</code> (sur l'appareil <code>{device}</code>) ne dispose que de {free} ({free_percent}%) espace restant (sur {total}). Faites attention.",
|
||||
"diagnosis_ram_verylow": "Le système ne dispose plus que de {available} ({available_percent}%)! (sur {total})",
|
||||
"diagnosis_ram_low": "Le système n’a plus de {available} ({available_percent}%) RAM sur {total}. Faites attention.",
|
||||
"diagnosis_swap_none": "Le système n’a aucun espace de swap. Vous devriez envisager d’ajouter au moins {recommended} de swap pour éviter les situations où le système manque de mémoire.",
|
||||
"diagnosis_swap_notsomuch": "Le système ne dispose que de {total} de swap. Vous devez envisager d’avoir au moins {recommended} pour éviter les situations où le système manque de mémoire.",
|
||||
"diagnosis_swap_ok": "Le système dispose de {total} de swap !",
|
||||
"diagnosis_regenconf_manually_modified": "Le fichier de configuration {file} a été modifié manuellement.",
|
||||
"diagnosis_regenconf_manually_modified": "Le fichier de configuration <code>{file}</code> semble avoir été modifié manuellement.",
|
||||
"diagnosis_regenconf_manually_modified_debian": "Le fichier de configuration {file} a été modifié manuellement par rapport à celui par défaut de Debian.",
|
||||
"diagnosis_regenconf_manually_modified_details": "C’est probablement OK tant que vous savez ce que vous faites ;) !",
|
||||
"diagnosis_regenconf_manually_modified_details": "C’est probablement OK si vous savez ce que vous faites ! YunoHost cessera de mettre à jour ce fichier automatiquement ... Mais attention, les mises à jour de YunoHost pourraient contenir d’importantes modifications recommandées. Si vous le souhaitez, vous pouvez inspecter les différences avec <cmd>yunohost tools regen-conf {category} --dry-run --with-diff</cmd> et forcer la réinitialisation à la configuration recommandée avec <cmd>yunohost tools regen-conf {category} --force</cmd>",
|
||||
"diagnosis_regenconf_manually_modified_debian_details": "Cela peut probablement être OK, mais il faut garder un œil dessus …",
|
||||
"diagnosis_security_all_good": "Aucune vulnérabilité de sécurité critique n’a été trouvée.",
|
||||
"apps_catalog_init_success": "Système de catalogue d’applications initialisé !",
|
||||
"apps_catalog_failed_to_download": "Impossible de télécharger le catalogue des applications {apps_catalog}:{error}",
|
||||
"apps_catalog_failed_to_download": "Impossible de télécharger le catalogue des applications {apps_catalog} : {error}",
|
||||
"diagnosis_mail_outgoing_port_25_blocked": "Le port sortant 25 semble être bloqué. Vous devriez essayer de le débloquer dans le panneau de configuration de votre fournisseur de services Internet (ou hébergeur). En attendant, le serveur ne pourra pas envoyer de courrier électronique à d’autres serveurs.",
|
||||
"domain_cannot_remove_main_add_new_one": "Vous ne pouvez pas supprimer '{domain:s}' car il s’agit du domaine principal et de votre seul domaine. Vous devez d’abord ajouter un autre domaine à l’aide de 'yunohost domain add <another-domain.com>', puis définir comme domaine principal à l’aide de 'yunohost domain main-domain -n <nom-d’un-autre-domaine.com>' et vous pouvez ensuite supprimer le domaine '{domain:s}' à l’aide de 'yunohost domain remove {domain:s}'.",
|
||||
"domain_cannot_remove_main_add_new_one": "Vous ne pouvez pas supprimer '{domain:s}' car il s’agit du domaine principal et de votre seul domaine. Vous devez d’abord ajouter un autre domaine à l’aide de 'yunohost domain add <another-domain.com>', puis définir comme domaine principal à l’aide de 'yunohost domain main-domain -n <nom-d’un-autre-domaine.com>' et vous pouvez ensuite supprimer le domaine '{domain:s}' à l’aide de 'yunohost domain remove {domain:s}'.'",
|
||||
"diagnosis_security_vulnerable_to_meltdown_details": "Pour résoudre ce problème, vous devez mettre à niveau votre système et redémarrer pour charger le nouveau noyau Linux (ou contacter votre fournisseur de serveur si cela ne fonctionne pas). Voir https://meltdownattack.com/ pour plus d’informations.",
|
||||
"diagnosis_description_basesystem": "Système de base",
|
||||
"diagnosis_description_ip": "Connectivité Internet",
|
||||
|
@ -572,32 +562,32 @@
|
|||
"diagnosis_description_regenconf": "Configurations système",
|
||||
"diagnosis_description_security": "Contrôles de sécurité",
|
||||
"diagnosis_ports_could_not_diagnose": "Impossible de diagnostiquer si les ports sont accessibles de l'extérieur.",
|
||||
"diagnosis_ports_could_not_diagnose_details": "Erreur: {error}",
|
||||
"apps_catalog_updating": "Mise à jour du catalogue d'applications…",
|
||||
"diagnosis_ports_could_not_diagnose_details": "Erreur : {error}",
|
||||
"apps_catalog_updating": "Mise à jour du catalogue d’applications…",
|
||||
"apps_catalog_obsolete_cache": "Le cache du catalogue d'applications est vide ou obsolète.",
|
||||
"apps_catalog_update_success": "Le catalogue des applications a été mis à jour !",
|
||||
"diagnosis_mail_ougoing_port_25_ok": "Le port sortant 25 n’est pas bloqué et le courrier électronique peut être envoyé à d’autres serveurs.",
|
||||
"diagnosis_description_mail": "Email",
|
||||
"diagnosis_description_mail": "E-mail",
|
||||
"diagnosis_ports_unreachable": "Le port {port} n’est pas accessible de l’extérieur.",
|
||||
"diagnosis_ports_ok": "Le port {port} est accessible de l’extérieur.",
|
||||
"diagnosis_http_could_not_diagnose": "Impossible de diagnostiquer si le domaine est accessible de l’extérieur.",
|
||||
"diagnosis_http_could_not_diagnose_details": "Erreur: {error}",
|
||||
"diagnosis_http_could_not_diagnose_details": "Erreur : {error}",
|
||||
"diagnosis_http_ok": "Le domaine {domain} est accessible en HTTP depuis l’extérieur.",
|
||||
"diagnosis_http_unreachable": "Le domaine {domain} est inaccessible en HTTP depuis l’extérieur.",
|
||||
"diagnosis_unknown_categories": "Les catégories suivantes sont inconnues: {categories}",
|
||||
"diagnosis_unknown_categories": "Les catégories suivantes sont inconnues : {categories}",
|
||||
"migration_description_0013_futureproof_apps_catalog_system": "Migrer vers le nouveau système de catalogue d’applications à l’épreuve du temps",
|
||||
"app_upgrade_script_failed": "Une erreur s’est produite durant l’exécution du script de mise à niveau de l’application",
|
||||
"migration_description_0014_remove_app_status_json": "Supprimer les anciens fichiers d’application status.json",
|
||||
"diagnosis_services_running": "Le service {service} est en cours de fonctionnement !",
|
||||
"diagnosis_services_conf_broken": "La configuration est cassée pour le service {service} !",
|
||||
"diagnosis_ports_needed_by": "Rendre ce port accessible est nécessaire pour les fonctionnalités de type {category} (service {service})",
|
||||
"diagnosis_ports_forwarding_tip": "Pour résoudre ce problème, vous devez probablement configurer la redirection de port sur votre routeur Internet comme décrit sur https://yunohost.org/isp_box_config",
|
||||
"diagnosis_ports_forwarding_tip": "Pour résoudre ce problème, vous devez probablement configurer la redirection de port sur votre routeur Internet comme décrit dans <a href='https://yunohost.org/isp_box_config'>https://yunohost.org/isp_box_config</a>",
|
||||
"diagnosis_http_connection_error": "Erreur de connexion : impossible de se connecter au domaine demandé, il est probablement injoignable.",
|
||||
"diagnosis_no_cache": "Pas encore de cache de diagnostique pour la catégorie « {category} »",
|
||||
"yunohost_postinstall_end_tip": "La post-installation terminée! Pour finaliser votre configuration, il est recommendé de :\n - ajouter un premier utilisateur depuis la section \"Utilisateurs\" de l’interface web (ou \"yunohost user create <nom d’utilisateur>\" en ligne de commande);\n - diagnostiquer les potentiels problèmes dans la section \"Diagnostic\" de l'interface web (ou \"yunohost diagnosis run\" en ligne de commande);\n - lire les parties \"Finalisation de votre configuration\" et \"Découverte de Yunohost\" dans le guide de l’administrateur: https://yunohost.org/admindoc.",
|
||||
"diagnosis_services_bad_status_tip": "Vous pouvez essayer de redémarrer le service. Si cela ne fonctionne pas, consultez les journaux de service à l’aide de 'yunohost service log {service}' ou de la section 'Services' dans la webadmin.",
|
||||
"diagnosis_http_bad_status_code": "Le système de diagnostique n’a pas réussi à contacter votre serveur. Il se peut qu’une autre machine réponde à la place de votre serveur. Vérifiez que le port 80 est correctement redirigé, que votre configuration nginx est à jour et qu’un reverse-proxy n’interfère pas.",
|
||||
"diagnosis_http_timeout": "Expiration du délai en essayant de contacter votre serveur de l’extérieur. Il semble être inaccessible. Vérifiez que vous transférez correctement le port 80, que nginx est en cours d’exécution et qu’un pare-feu n’interfère pas.",
|
||||
"yunohost_postinstall_end_tip": "La post-installation terminée! Pour finaliser votre configuration, il est recommandé de :\n - ajouter un premier utilisateur depuis la section \"Utilisateurs\" de l’interface web (ou \"yunohost user create <nom d’utilisateur>\" en ligne de commande) ;\n - diagnostiquer les potentiels problèmes dans la section \"Diagnostic\" de l'interface web (ou \"yunohost diagnosis run\" en ligne de commande) ;\n - lire les parties \"Finalisation de votre configuration\" et \"Découverte de YunoHost\" dans le guide de l’administrateur: https://yunohost.org/admindoc.",
|
||||
"diagnosis_services_bad_status_tip": "Vous pouvez essayer de <a href='#/services/{service}'>redémarrer le service</a>, et si cela ne fonctionne pas, consultez <a href='#/services/{service}'>les journaux de service dans le webadmin</a> (à partir de la ligne de commande, vous pouvez le faire avec <cmd>yunohost service restart {service}</cmd> et <cmd>yunohost service log {service}</cmd> ).",
|
||||
"diagnosis_http_bad_status_code": "Le système de diagnostique n’a pas réussi à contacter votre serveur. Il se peut qu’une autre machine réponde à la place de votre serveur. Vérifiez que le port 80 est correctement redirigé, que votre configuration Nginx est à jour et qu’un reverse-proxy n’interfère pas.",
|
||||
"diagnosis_http_timeout": "Expiration du délai en essayant de contacter votre serveur de l’extérieur. Il semble être inaccessible. Vérifiez que vous transférez correctement le port 80, que Nginx est en cours d’exécution et qu’un pare-feu n’interfère pas.",
|
||||
"global_settings_setting_pop3_enabled": "Activer le protocole POP3 pour le serveur de messagerie",
|
||||
"log_app_action_run": "Lancer l’action de l’application '{}'",
|
||||
"log_app_config_show_panel": "Montrer le panneau de configuration de l’application '{}'",
|
||||
|
@ -608,5 +598,75 @@
|
|||
"diagnosis_basesystem_hardware": "L’architecture du serveur est {virt} {arch}",
|
||||
"group_already_exist_on_system_but_removing_it": "Le groupe {group} est déjà présent dans les groupes du système, mais YunoHost va le supprimer…",
|
||||
"certmanager_warning_subdomain_dns_record": "Le sous-domaine '{subdomain:s}' ne résout pas vers la même adresse IP que '{domain:s}'. Certaines fonctionnalités seront indisponibles tant que vous n’aurez pas corrigé cela et regénéré le certificat.",
|
||||
"domain_cannot_add_xmpp_upload": "Vous ne pouvez pas ajouter de domaine commençant par 'xmpp-upload.'. Ce type de nom est réservé à la fonctionnalité d’upload XMPP intégrée dans YunoHost."
|
||||
"domain_cannot_add_xmpp_upload": "Vous ne pouvez pas ajouter de domaine commençant par 'xmpp-upload.'. Ce type de nom est réservé à la fonctionnalité d’upload XMPP intégrée dans YunoHost.",
|
||||
"diagnosis_mail_outgoing_port_25_ok": "Le serveur de messagerie SMTP peut envoyer des e-mails (le port sortant 25 n'est pas bloqué).",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_details": "Vous devez d’abord essayer de débloquer le port sortant 25 dans votre interface de routeur Internet ou votre interface d’hébergement. (Certains hébergeurs peuvent vous demander de leur envoyer un ticket de support pour cela).",
|
||||
"diagnosis_mail_ehlo_bad_answer": "Un service non SMTP a répondu sur le port 25 en IPv{ipversion}",
|
||||
"diagnosis_mail_ehlo_bad_answer_details": "Cela peut être dû à une autre machine qui répond au lieu de votre serveur.",
|
||||
"diagnosis_mail_ehlo_wrong": "Un autre serveur de messagerie SMTP répond sur IPv{ipversion}. Votre serveur ne sera probablement pas en mesure de recevoir des e-mails.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose": "Impossible de diagnostiquer si le serveur de messagerie postfix est accessible de l’extérieur en IPv{ipversion}.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose_details": "Erreur : {error}",
|
||||
"diagnosis_mail_fcrdns_dns_missing": "Aucun DNS inverse n’est défini pour IPv{ipversion}. Certains e-mails seront peut-être refusés ou considérés comme des spam.",
|
||||
"diagnosis_mail_fcrdns_ok": "Votre DNS inverse est correctement configuré !",
|
||||
"diagnosis_mail_fcrdns_nok_details": "Vous devez d’abord essayer de configurer le DNS inverse avec <code>{ehlo_domain}</code> dans votre interface de routeur Internet ou votre interface d’hébergement. (Certains hébergeurs peuvent vous demander de leur envoyer un ticket de support pour cela).",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain": "Le DNS inverse n'est pas correctement configuré en IPv{ipversion}. Certains e-mails seront peut-être refusés ou considérés comme des spam.",
|
||||
"diagnosis_mail_blacklist_ok": "Les adresses IP et les domaines utilisés par ce serveur ne semblent pas être sur liste noire",
|
||||
"diagnosis_mail_blacklist_reason": "La raison de la liste noire est : {reason}",
|
||||
"diagnosis_mail_blacklist_website": "Après avoir identifié la raison pour laquelle vous êtes répertorié et l'avoir corrigé, n’hésitez pas à demander le retrait de votre IP ou domaine sur {blacklist_website}",
|
||||
"diagnosis_mail_queue_ok": "{nb_pending} e-mails en attente dans les files d'attente de messagerie",
|
||||
"diagnosis_mail_queue_unavailable_details": "Erreur : {error}",
|
||||
"diagnosis_mail_queue_too_big": "Trop d’e-mails en attente dans la file d'attente ({nb_pending} e-mails)",
|
||||
"global_settings_setting_smtp_allow_ipv6": "Autoriser l'utilisation d’IPv6 pour recevoir et envoyer du courrier",
|
||||
"diagnosis_security_all_good": "Aucune vulnérabilité de sécurité critique n’a été trouvée.",
|
||||
"diagnosis_display_tip": "Pour voir les problèmes détectés, vous pouvez accéder à la section Diagnostic du webadmin ou exécuter « yunohost diagnosis show --issues » à partir de la ligne de commande.",
|
||||
"diagnosis_ip_global": "IP globale : <code>{global}</code>",
|
||||
"diagnosis_ip_local": "IP locale : <code>{local}</code>",
|
||||
"diagnosis_dns_point_to_doc": "Veuillez consulter la documentation sur <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si vous avez besoin d’aide pour configurer les enregistrements DNS.",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Certains fournisseurs ne vous laisseront pas débloquer le port sortant 25 parce qu’ils ne se soucient pas de la neutralité du Net. <br> - Certains d’entre eux offrent l’alternative d'<a href='https://yunohost.org/#/smtp_relay'>utiliser un serveur de messagerie relai</a> bien que cela implique que le relai sera en mesure d’espionner votre trafic de messagerie. <br> - Une alternative respectueuse de la vie privée consiste à utiliser un VPN *avec une IP publique dédiée* pour contourner ce type de limites. Voir <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a> <br> - Vous pouvez également envisager de passer à <a href='https://yunohost.org/#/isp'>un fournisseur plus respectueux de la neutralité du net</a>",
|
||||
"diagnosis_mail_ehlo_ok": "Le serveur de messagerie SMTP est accessible de l'extérieur et peut donc recevoir des e-mails !",
|
||||
"diagnosis_mail_ehlo_unreachable": "Le serveur de messagerie SMTP est inaccessible de l’extérieur en IPv{ipversion}. Il ne pourra pas recevoir d’e-mails.",
|
||||
"diagnosis_mail_ehlo_unreachable_details": "Impossible d'ouvrir une connexion sur le port 25 à votre serveur en IPv{ipversion}. Il semble inaccessible. <br> 1. La cause la plus courante de ce problème est que le port 25 <a href='https://yunohost.org/isp_box_config'>n'est pas correctement redirigé vers votre serveur</a>. <br> 2. Vous devez également vous assurer que le service postfix est en cours d'exécution. <br> 3. Sur les configurations plus complexes: assurez-vous qu'aucun pare-feu ou proxy inversé n'interfère.",
|
||||
"diagnosis_mail_ehlo_wrong_details": "Le EHLO reçu par le serveur de diagnostique distant en IPv{ipversion} est différent du domaine de votre serveur. <br> EHLO reçu: <code>{wrong_ehlo}</code> <br> Attendu : <code>{right_ehlo}</code> <br> La cause la plus courante ce problème est que le port 25 <a href='https://yunohost.org/isp_box_config'> n’est pas correctement redirigé vers votre serveur </a>. Vous pouvez également vous assurer qu’aucun pare-feu ou proxy inversé n’interfère.",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_4": "Certains fournisseurs ne vous laisseront pas configurer votre DNS inversé (ou leur fonctionnalité pourrait être cassée …). Si vous rencontrez des problèmes à cause de cela, envisagez les solutions suivantes : <br> - Certains FAI fournissent l’alternative de <a href='https://yunohost.org/#/smtp_relay'>à l’aide d’un relais de serveur de messagerie</a> bien que cela implique que le relais pourra espionner votre trafic de messagerie. <br> - Une alternative respectueuse de la vie privée consiste à utiliser un VPN *avec une IP publique dédiée* pour contourner ce type de limites. Voir <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a> <br> - Enfin, il est également possible de <a href='https://yunohost.org/#/isp'>changer de fournisseur</a>",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_6": "Certains fournisseurs ne vous laisseront pas configurer votre DNS inversé (ou leur fonctionnalité pourrait être cassée ...). Si votre DNS inversé est correctement configuré en IPv4, vous pouvez essayer de désactiver l'utilisation d'IPv6 lors de l'envoi d'e-mails en exécutant <cmd>yunohost settings set smtp.allow_ipv6 -v off</cmd>. Remarque: cette dernière solution signifie que vous ne pourrez pas envoyer ou recevoir de courriels avec les quelques serveurs qui ont uniquement de l'IPv6.",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "DNS inverse actuel : <code>{rdns_domain}</code> <br> Valeur attendue : <code>{ehlo_domain}</code>",
|
||||
"diagnosis_mail_blacklist_listed_by": "Votre IP ou domaine <code>{item}</code> est sur liste noire sur {blacklist_name}",
|
||||
"diagnosis_mail_queue_unavailable": "Impossible de consulter le nombre d’e-mails en attente dans la file d'attente",
|
||||
"diagnosis_ports_partially_unreachable": "Le port {port} n'est pas accessible de l'extérieur en IPv{failed}.",
|
||||
"diagnosis_http_hairpinning_issue": "Votre réseau local ne semble pas supporter l'hairpinning.",
|
||||
"diagnosis_http_hairpinning_issue_details": "C'est probablement à cause de la box/routeur de votre fournisseur d'accès internet. Par conséquent, les personnes extérieures à votre réseau local pourront accéder à votre serveur comme prévu, mais pas les personnes internes au réseau local (comme vous, probablement ?) si elles utilisent le nom de domaine ou l'IP globale. Vous pourrez peut-être améliorer la situation en consultant <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a>",
|
||||
"diagnosis_http_partially_unreachable": "Le domaine {domain} semble inaccessible en HTTP depuis l’extérieur du réseau local en IPv{failed}, bien qu’il fonctionne en IPv{passed}.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date": "La configuration Nginx de ce domaine semble avoir été modifiée manuellement et empêche YunoHost de diagnostiquer si elle est accessible en HTTP.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date_details": "Pour corriger la situation, inspectez la différence avec la ligne de commande en utilisant les outils <cmd>yunohost tools regen-conf nginx --dry-run --with-diff</cmd> et si vous êtes d’accord, appliquez les modifications avec <cmd>yunohost tools regen-conf nginx --force</cmd>.",
|
||||
"backup_archive_cant_retrieve_info_json": "Impossible d'avoir des informations sur l'archive '{archive}' ... Le fichier info.json ne peut pas être trouvé (ou n'est pas un fichier json valide).",
|
||||
"backup_archive_corrupted": "Il semble que l'archive de la sauvegarde '{archive}' est corrompue : {error}",
|
||||
"diagnosis_ip_no_ipv6_tip": "L'utilisation de IPv6 n'est pas obligatoire pour le fonctionnement de votre serveur, mais cela contribue à la santé d'Internet dans son ensemble. IPv6 généralement configuré automatiquement par votre système ou votre FAI s'il est disponible. Autrement, vous devrez prendre quelque minutes pour le configurer manuellement à l'aide de cette documentation: <a href='https://yunohost.org/#/ipv6'>https://yunohost.org/#/ipv6</a>. Si vous ne pouvez pas activer IPv6 ou si c'est trop technique pour vous, vous pouvez aussi ignorer cet avertissement sans que cela pose problème.",
|
||||
"diagnosis_domain_expiration_not_found": "Impossible de vérifier la date d'expiration de certains domaines",
|
||||
"diagnosis_domain_expiration_not_found_details": "Les informations WHOIS pour le domaine {domain} ne semblent pas contenir les informations concernant la date d'expiration ?",
|
||||
"diagnosis_domain_not_found_details": "Le domaine {domain} n'existe pas dans la base de donnée WHOIS ou est expiré !",
|
||||
"diagnosis_domain_expiration_success": "Vos domaines sont enregistrés et ne vont pas expirer prochainement.",
|
||||
"diagnosis_domain_expiration_warning": "Certains domaines vont expirer prochainement !",
|
||||
"diagnosis_domain_expiration_error": "Certains domaines vont expirer TRÈS PROCHAINEMENT !",
|
||||
"diagnosis_domain_expires_in": "{domain} expire dans {days} jours.",
|
||||
"certmanager_domain_not_diagnosed_yet": "Il n'y a pas encore de résultat de diagnostic pour le domaine %s. Merci de relancer un diagnostic pour les catégories 'Enregistrements DNS' et 'Web' dans la section Diagnostique pour vérifier si le domaine est prêt pour Let's Encrypt. (Ou si vous savez ce que vous faites, utilisez '--no-checks' pour désactiver la vérification.)",
|
||||
"diagnosis_swap_tip": "Merci d'être prudent et conscient que si vous hébergez une partition SWAP sur une carte SD ou un disque SSD, cela risque de réduire drastiquement l’espérance de vie du périphérique.",
|
||||
"restore_already_installed_apps": "Les applications suivantes ne peuvent pas être restaurées car elles sont déjà installées : {apps}",
|
||||
"regenconf_need_to_explicitly_specify_ssh": "La configuration de ssh a été modifiée manuellement. Vous devez explicitement indiquer la mention --force à \"ssh\" pour appliquer les changements.",
|
||||
"migration_0015_cleaning_up": "Nettoyage du cache et des paquets qui ne sont plus utiles …",
|
||||
"migration_0015_specific_upgrade": "Commencement de la mise à jour des paquets du système qui doivent être mis à jour séparément …",
|
||||
"migration_0015_modified_files": "Veuillez noter que les fichiers suivants ont été modifiés manuellement et pourraient être écrasés à la suite de la mise à niveau : {manually_modified_files}",
|
||||
"migration_0015_problematic_apps_warning": "Veuillez noter que des applications qui peuvent poser problèmes ont été détectées. Il semble qu'elles n'aient pas été installées à partir du catalogue d'applications YunoHost, ou bien qu'elles ne soient pas signalées comme \"fonctionnelles\". Par conséquent, il n'est pas possible de garantir que les applications suivantes fonctionneront encore après la mise à niveau : {problematic_apps}",
|
||||
"migration_0015_general_warning": "Veuillez noter que cette migration est une opération délicate. L'équipe YunoHost a fait de son mieux pour la revérifier et la tester, mais la migration pourrait quand même casser des éléments du système ou de ses applications.\n\nIl est donc recommandé :\n…- de faire une sauvegarde de toute donnée ou application critique. Plus d'informations ici https://yunohost.org/backup ;\n…- d'être patient après le lancement de la migration. Selon votre connexion internet et votre matériel, la mise à niveau peut prendre jusqu'à quelques heures.",
|
||||
"migration_0015_system_not_fully_up_to_date": "Votre système n'est pas entièrement à jour. Veuillez effectuer une mise à jour normale avant de lancer la migration vers Buster.",
|
||||
"migration_0015_not_enough_free_space": "L'espace libre est très faible dans /var/ ! Vous devriez avoir au moins 1 Go de libre pour effectuer cette migration.",
|
||||
"migration_0015_not_stretch": "La distribution Debian actuelle n'est pas Stretch !",
|
||||
"migration_0015_yunohost_upgrade": "Démarrage de la mise à jour de YunoHost …",
|
||||
"migration_0015_still_on_stretch_after_main_upgrade": "Quelque chose s'est mal passé lors de la mise à niveau, le système semble toujours être sous Debian Stretch",
|
||||
"migration_0015_main_upgrade": "Démarrage de la mise à niveau générale …",
|
||||
"migration_0015_patching_sources_list": "Mise à jour du fichier sources.lists …",
|
||||
"migration_0015_start": "Démarrage de la migration vers Buster",
|
||||
"migration_description_0015_migrate_to_buster": "Mise à niveau du système vers Debian Buster et YunoHost 4.x",
|
||||
"diagnosis_dns_try_dyndns_update_force": "La configuration DNS de ce domaine devrait être automatiquement gérée par Yunohost. Si ce n'est pas le cas, vous pouvez essayer de forcer une mise à jour en utilisant <cmd>yunohost dyndns update --force</cmd>.",
|
||||
"app_packaging_format_not_supported": "Cette application ne peut pas être installée car son format n'est pas pris en charge par votre version de Yunohost. Vous devriez probablement envisager de mettre à jour votre système.",
|
||||
"migration_0015_weak_certs": "Il a été constaté que les certificats suivants utilisent encore des algorithmes de signature peu robustes et doivent être mis à jour pour être compatibles avec la prochaine version de nginx : {certs}"
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"app_already_installed": "{app:s} è già installata",
|
||||
"app_extraction_failed": "Impossibile estrarre i file di installazione",
|
||||
"app_not_installed": "{app:s} non è installata",
|
||||
"app_not_installed": "Impossibile trovare l'applicazione '{app:s}' nell'elenco delle applicazioni installate: {all_apps}",
|
||||
"app_unknown": "Applicazione sconosciuta",
|
||||
"ask_email": "Indirizzo email",
|
||||
"ask_password": "Password",
|
||||
|
@ -27,16 +27,16 @@
|
|||
"user_deleted": "L'utente è stato cancellato",
|
||||
"admin_password": "Password dell'amministrazione",
|
||||
"admin_password_change_failed": "Impossibile cambiare la password",
|
||||
"admin_password_changed": "La password dell'amministrazione è stata cambiata",
|
||||
"app_install_files_invalid": "Non sono validi i file di installazione",
|
||||
"app_manifest_invalid": "Manifesto dell'applicazione non valido: {error}",
|
||||
"admin_password_changed": "La password d'amministrazione è stata cambiata",
|
||||
"app_install_files_invalid": "Questi file non possono essere installati",
|
||||
"app_manifest_invalid": "C'è qualcosa di scorretto nel manifesto dell'applicazione: {error}",
|
||||
"app_not_correctly_installed": "{app:s} sembra di non essere installata correttamente",
|
||||
"app_not_properly_removed": "{app:s} non è stata correttamente rimossa",
|
||||
"action_invalid": "L'azione '{action:s}' non è valida",
|
||||
"app_removed": "{app:s} è stata rimossa",
|
||||
"app_sources_fetch_failed": "Impossibile riportare i file sorgenti",
|
||||
"app_upgrade_failed": "Impossibile aggiornare {app:s}",
|
||||
"app_upgraded": "{app:s} è stata aggiornata",
|
||||
"app_removed": "{app:s} rimossa",
|
||||
"app_sources_fetch_failed": "Impossibile riportare i file sorgenti, l'URL è corretto?",
|
||||
"app_upgrade_failed": "Impossibile aggiornare {app:s}: {error}",
|
||||
"app_upgraded": "{app:s} aggiornata",
|
||||
"app_requirements_checking": "Controllo i pacchetti richiesti per {app}…",
|
||||
"app_requirements_unmeet": "Requisiti non soddisfatti per {app}, il pacchetto {pkgname} ({version}) deve essere {spec}",
|
||||
"ask_firstname": "Nome",
|
||||
|
@ -45,8 +45,8 @@
|
|||
"ask_new_admin_password": "Nuova password dell'amministrazione",
|
||||
"backup_app_failed": "Non è possibile fare il backup dell'applicazione '{app:s}'",
|
||||
"backup_archive_app_not_found": "L'applicazione '{app:s}' non è stata trovata nel archivio di backup",
|
||||
"app_argument_choice_invalid": "Scelta non valida per l'argomento '{name:s}', deve essere uno di {choices:s}",
|
||||
"app_argument_invalid": "Valore non valido per '{name:s}': {error:s}",
|
||||
"app_argument_choice_invalid": "Usa una delle seguenti scelte '{choices:s}' per il parametro '{name:s}'",
|
||||
"app_argument_invalid": "Scegli un valore valido per il parametro '{name:s}': {error:s}",
|
||||
"app_argument_required": "L'argomento '{name:s}' è requisito",
|
||||
"app_id_invalid": "Identificativo dell'applicazione non valido",
|
||||
"app_unsupported_remote_type": "Il tipo remoto usato per l'applicazione non è supportato",
|
||||
|
@ -171,15 +171,15 @@
|
|||
"certmanager_attempt_to_renew_nonLE_cert": "Il certificato per il dominio {domain:s} non è emesso da Let's Encrypt. Impossibile rinnovarlo automaticamente!",
|
||||
"certmanager_attempt_to_renew_valid_cert": "Il certificato per il dominio {domain:s} non è a scadere! Usa --force per ignorare",
|
||||
"certmanager_domain_http_not_working": "Sembra che non sia possibile accedere al dominio {domain:s} attraverso HTTP. Verifica la configurazione del DNS e di nginx",
|
||||
"app_already_installed_cant_change_url": "Questa applicazione è già installata. L'URL non può essere cambiato solo da questa funzione. Guarda se `app changeurl` è disponibile.",
|
||||
"app_already_installed_cant_change_url": "Questa applicazione è già installata. L'URL non può essere cambiato solo da questa funzione. Controlla se `app changeurl` è disponibile.",
|
||||
"app_already_up_to_date": "{app:s} è già aggiornata",
|
||||
"app_change_url_failed_nginx_reload": "Riavvio di nginx fallito. Questo è il risultato di 'nginx -t':\n{nginx_errors:s}",
|
||||
"app_change_url_failed_nginx_reload": "Non riesco a riavviare NGINX. Questo è il risultato di 'nginx -t':\n{nginx_errors:s}",
|
||||
"app_change_url_identical_domains": "Il vecchio ed il nuovo dominio/percorso_url sono identici ('{domain:s}{path:s}'), nessuna operazione necessaria.",
|
||||
"app_change_url_no_script": "L'applicazione '{app_name:s}' non supporta ancora la modifica dell'URL. Forse dovresti aggiornare l'applicazione.",
|
||||
"app_change_url_success": "URL dell'applicazione {app:s} cambiato con successo in {domain:s}{path:s}",
|
||||
"app_make_default_location_already_used": "Impostazione dell'applicazione '{app}' come predefinita del dominio {domain} non riuscita perchè è già stata impostata per l'altra applicazione '{other_app}'",
|
||||
"app_location_unavailable": "Questo URL non è disponibile o va in conflitto con la/le applicazione/i già installata/e:\n{apps:s}",
|
||||
"app_upgrade_app_name": "Aggiornando l'applicazione {app}…",
|
||||
"app_change_url_no_script": "L'applicazione '{app_name:s}' non supporta ancora la modifica dell'URL. Forse dovresti aggiornarla.",
|
||||
"app_change_url_success": "L'URL dell'applicazione {app:s} è stato cambiato in {domain:s}{path:s}",
|
||||
"app_make_default_location_already_used": "Impostazione dell'applicazione '{app}' come predefinita del dominio non riuscita perché il dominio {domain} è già in uso per l'altra applicazione '{other_app}'",
|
||||
"app_location_unavailable": "Questo URL non è più disponibile o va in conflitto con la/le applicazione/i già installata/e:\n{apps:s}",
|
||||
"app_upgrade_app_name": "Aggiornamento dell'applicazione {app}…",
|
||||
"app_upgrade_some_app_failed": "Impossibile aggiornare alcune applicazioni",
|
||||
"backup_abstract_method": "Questo metodo di backup non è ancora stato implementato",
|
||||
"backup_applying_method_borg": "Inviando tutti i file da salvare nel backup nel deposito borg-backup…",
|
||||
|
@ -212,11 +212,11 @@
|
|||
"certmanager_cert_install_success": "Certificato Let's Encrypt per il dominio {domain:s} installato con successo!",
|
||||
"aborting": "Annullamento.",
|
||||
"admin_password_too_long": "Per favore scegli una password più corta di 127 caratteri",
|
||||
"app_not_upgraded": "Le seguenti app non sono state aggiornate: {apps}",
|
||||
"app_start_install": "Installando l'applicazione {app}…",
|
||||
"app_start_remove": "Rimuovendo l'applicazione {app}…",
|
||||
"app_start_backup": "Raccogliendo file da salvare nel backup per {app}…",
|
||||
"app_start_restore": "Ripristinando l'applicazione {app}…",
|
||||
"app_not_upgraded": "Impossibile aggiornare le applicazioni '{failed_app}' e di conseguenza l'aggiornamento delle seguenti applicazione è stato cancellato: {apps}",
|
||||
"app_start_install": "Installando l'applicazione '{app}'…",
|
||||
"app_start_remove": "Rimozione dell'applicazione {app}…",
|
||||
"app_start_backup": "Raccogliendo file da salvare nel backup per '{app}'…",
|
||||
"app_start_restore": "Ripristino dell'applicazione '{app}'…",
|
||||
"app_upgrade_several_apps": "Le seguenti app saranno aggiornate : {apps}",
|
||||
"ask_new_domain": "Nuovo dominio",
|
||||
"ask_new_path": "Nuovo percorso",
|
||||
|
@ -233,7 +233,7 @@
|
|||
"password_too_simple_4": "La password deve essere lunga almeno 12 caratteri e contenere numeri, maiuscole e minuscole",
|
||||
"users_available": "Utenti disponibili:",
|
||||
"yunohost_ca_creation_success": "L'autorità di certificazione locale è stata creata.",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "Questa app richiede alcuni servizi che attualmente non sono attivi. Prima di continuare, dovresti provare a riavviare i seguenti servizi (e possibilmente capire perchè questi non siano attivi) : {services}",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "I seguenti servizi dovrebbero essere in funzione per completare questa azione: {services}. Prova a riavviarli per proseguire (e possibilmente cercare di capire come ma non funzionano più).",
|
||||
"backup_output_symlink_dir_broken": "Hai un collegamento errato alla tua cartella di archiviazione '{path:s}'. Potresti avere delle impostazioni particolari per salvare i tuoi dati su un altro spazio, in questo caso probabilmente ti sei scordato di rimontare o collegare il tuo hard disk o la chiavetta usb.",
|
||||
"certmanager_conflicting_nginx_file": "Impossibile preparare il dominio per il controllo ACME: il file di configurazione nginx {filepath:s} è in conflitto e dovrebbe essere prima rimosso",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Tempo scaduto durante il tentativo di recupero di un certificato intermedio da Let's Encrypt. Installazione/rinnovo non riuscito - per favore riprova più tardi.",
|
||||
|
@ -264,7 +264,7 @@
|
|||
"global_settings_reset_success": "Successo. Le tue impostazioni precedenti sono state salvate in {path:s}",
|
||||
"global_settings_setting_example_bool": "Esempio di opzione booleana",
|
||||
"global_settings_setting_example_enum": "Esempio di opzione enum",
|
||||
"already_up_to_date": "Niente da fare! Tutto è già aggiornato!",
|
||||
"already_up_to_date": "Niente da fare. Tutto è già aggiornato.",
|
||||
"global_settings_setting_example_int": "Esempio di opzione int",
|
||||
"global_settings_setting_example_string": "Esempio di opzione string",
|
||||
"global_settings_setting_security_nginx_compatibility": "Bilanciamento tra compatibilità e sicurezza per il server web nginx. Riguarda gli algoritmi di cifratura (e altri aspetti legati alla sicurezza)",
|
||||
|
@ -335,5 +335,10 @@
|
|||
"migration_0003_not_jessie": "La distribuzione attuale non è Jessie!",
|
||||
"migration_0003_system_not_fully_up_to_date": "Il tuo sistema non è completamente aggiornato. Per favore prima esegui un aggiornamento normale prima di migrare a stretch.",
|
||||
"this_action_broke_dpkg": "Questa azione ha danneggiato dpkg/apt (i gestori di pacchetti del sistema)… Puoi provare a risolvere questo problema connettendoti via SSH ed eseguendo `sudo dpkg --configure -a`.",
|
||||
"app_action_broke_system": "Questa azione sembra avere roto servizi importanti: {services}"
|
||||
"app_action_broke_system": "Questa azione sembra avere rotto questi servizi importanti: {services}",
|
||||
"app_remove_after_failed_install": "Rimozione dell'applicazione a causa del fallimento dell'installazione…",
|
||||
"app_install_script_failed": "Si è verificato un errore nello script di installazione dell'applicazione",
|
||||
"app_install_failed": "Impossibile installare {app}:{error}",
|
||||
"app_full_domain_unavailable": "Spiacente, questa app deve essere installata su un proprio dominio, ma altre applicazioni sono state installate sul dominio '{domain}'. Dovresti invece usare un sotto-dominio dedicato per questa app.",
|
||||
"app_upgrade_script_failed": "È stato trovato un errore nello script di aggiornamento dell'applicazione"
|
||||
}
|
|
@ -1 +1,3 @@
|
|||
{}
|
||||
{
|
||||
"password_too_simple_1": "पासवर्ड कम्तिमा characters अक्षर लामो हुनु आवश्यक छ"
|
||||
}
|
||||
|
|
|
@ -472,7 +472,7 @@
|
|||
"migrations_not_pending_cant_skip": "Aquestas migracions son pas en espèra, las podètz pas doncas ignorar : {ids}",
|
||||
"app_action_broke_system": "Aquesta accion sembla aver copat de servicis importants : {services}",
|
||||
"diagnosis_display_tip_web": "Podètz anar a la seccion Diagnostic (dins l’ecran d’acuèlh) per veire los problèmas trobats.",
|
||||
"diagnosis_ip_no_ipv6": "Lo servidor a pas d’adreça IPv5 activa.",
|
||||
"diagnosis_ip_no_ipv6": "Lo servidor a pas d’adreça IPv6 activa.",
|
||||
"diagnosis_ip_not_connected_at_all": "Lo servidor sembla pas connectat a Internet ?!",
|
||||
"diagnosis_security_all_good": "Cap de vulnerabilitat de seguretat critica pas trobada.",
|
||||
"diagnosis_description_regenconf": "Configuracion sistèma",
|
||||
|
@ -537,7 +537,7 @@
|
|||
"group_cannot_be_deleted": "Lo grop « {group} » pòt pas èsser suprimit manualament.",
|
||||
"diagnosis_found_warnings": "Trobat {warnings} element(s) que se poirián melhorar per {category}.",
|
||||
"diagnosis_dns_missing_record": "Segon la configuracion DNS recomandada, vos calriá ajustar un enregistrament DNS\ntipe: {type}\nnom: {name}\nvalor: {value}",
|
||||
"diagnosis_dns_discrepancy": "Segon la configuracion DNS recomandada, la valor per l’enregistrament DNS\ntipe: {type}\nnom: {name}\ndeuriá èsser: {current}\nallòc de: {value}",
|
||||
"diagnosis_dns_discrepancy": "La configuracion DNS seguenta sembla pas la configuracion recomandada : <br>Tipe : <code>{type}</code><br>Nom : <code>{name}</code><br>Valors actualas :<code> {current]</code><br>Valor esperada : <code>{value}</code>",
|
||||
"diagnosis_regenconf_manually_modified_debian_details": "Es pas problematic, mas car téner d’agacher...",
|
||||
"diagnosis_ports_could_not_diagnose": "Impossible de diagnosticar se los pòrts son accessibles de l’exterior.",
|
||||
"diagnosis_ports_could_not_diagnose_details": "Error : {error}",
|
||||
|
@ -571,5 +571,16 @@
|
|||
"global_settings_setting_pop3_enabled": "Activar lo protocòl POP3 pel servidor de corrièr",
|
||||
"diagnosis_diskusage_ok": "Lo lòc d’emmagazinatge {mountpoint} (sul periferic {device}) a encara {free} ({free_percent}%) de liure !",
|
||||
"diagnosis_swap_none": "Lo sistèma a pas cap de memòria d’escambi. Auriatz de considerar d’ajustar almens {recommended} d’escambi per evitar las situacions ont lo sistèma manca de memòria.",
|
||||
"diagnosis_swap_notsomuch": "Lo sistèma a solament {total} de memòria d’escambi. Auriatz de considerar d’ajustar almens {recommended} d’escambi per evitar las situacions ont lo sistèma manca de memòria."
|
||||
"diagnosis_swap_notsomuch": "Lo sistèma a solament {total} de memòria d’escambi. Auriatz de considerar d’ajustar almens {recommended} d’escambi per evitar las situacions ont lo sistèma manca de memòria.",
|
||||
"diagnosis_description_web": "Web",
|
||||
"diagnosis_ip_global": "IP Global : <code>{global}</code>",
|
||||
"diagnosis_ip_local": "IP locala : <code>{local}</code>",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose_details": "Error : {error}",
|
||||
"diagnosis_mail_queue_unavailable_details": "Error : {error}",
|
||||
"diagnosis_basesystem_hardware": "L’arquitectura del servidor es {virt} {arch}",
|
||||
"diagnosis_basesystem_hardware_board": "Lo modèl de carta del servidor es {model}",
|
||||
"backup_archive_corrupted": "Sembla que l’archiu de la salvagarda « {archive} » es corromput : {error}",
|
||||
"diagnosis_domain_expires_in": "{domain} expiraà d’aquí {days} jorns.",
|
||||
"migration_0015_cleaning_up": "Netejatge de la memòria cache e dels paquets pas mai necessaris…",
|
||||
"restore_already_installed_apps": "Restauracion impossibla de las aplicacions seguentas que son ja installadas : {apps}"
|
||||
}
|
||||
|
|
10
pytest.ini
10
pytest.ini
|
@ -2,3 +2,13 @@
|
|||
addopts = -s -v
|
||||
norecursedirs = dist doc build .tox .eggs
|
||||
testpaths = tests/
|
||||
markers =
|
||||
with_system_archive_from_2p4
|
||||
with_backup_recommended_app_installed
|
||||
clean_opt_dir
|
||||
with_wordpress_archive_from_2p4
|
||||
with_legacy_app_installed
|
||||
with_backup_recommended_app_installed_with_ynh_restore
|
||||
with_permission_app_installed
|
||||
filterwarnings =
|
||||
ignore::urllib3.exceptions.InsecureRequestWarning
|
|
@ -35,14 +35,13 @@ import subprocess
|
|||
import glob
|
||||
import urllib
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime
|
||||
|
||||
from moulinette import msignals, m18n, msettings
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.network import download_json
|
||||
from moulinette.utils.filesystem import read_file, read_json, read_toml, read_yaml, write_to_file, write_to_json, write_to_yaml, chmod, chown, mkdir
|
||||
|
||||
from yunohost.service import service_log, service_status, _run_service_command
|
||||
from yunohost.service import service_status, _run_service_command
|
||||
from yunohost.utils import packages
|
||||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.log import is_unit_operation, OperationLogger
|
||||
|
@ -172,7 +171,7 @@ def app_info(app, full=False):
|
|||
ret["manifest"] = local_manifest
|
||||
ret['settings'] = settings
|
||||
|
||||
absolute_app_name = app if "__" not in app else app[:app.index('__')] # idk this is the name of the app even for multiinstance apps (so wordpress__2 -> wordpress)
|
||||
absolute_app_name, _ = _parse_app_instance_name(app)
|
||||
ret["from_catalog"] = _load_apps_catalog()["apps"].get(absolute_app_name, {})
|
||||
ret['upgradable'] = _app_upgradable(ret)
|
||||
ret['supports_change_url'] = os.path.exists(os.path.join(APPS_SETTING_PATH, app, "scripts", "change_url"))
|
||||
|
@ -415,6 +414,7 @@ def app_upgrade(app=[], url=None, file=None):
|
|||
"""
|
||||
from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback
|
||||
from yunohost.permission import permission_sync_to_user, user_permission_list
|
||||
from yunohost.regenconf import manually_modified_files
|
||||
|
||||
apps = app
|
||||
# If no app is specified, upgrade all apps
|
||||
|
@ -478,16 +478,19 @@ def app_upgrade(app=[], url=None, file=None):
|
|||
env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb)
|
||||
env_dict["YNH_APP_LABEL"] = user_permission_list(full=True, ignore_system_perms=True, full_path=False)['permissions'][app_id+".main"]['label']
|
||||
|
||||
# Start register change on system
|
||||
related_to = [('app', app_instance_name)]
|
||||
operation_logger = OperationLogger('app_upgrade', related_to, env=env_dict)
|
||||
operation_logger.start()
|
||||
# We'll check that the app didn't brutally edit some system configuration
|
||||
manually_modified_files_before_install = manually_modified_files()
|
||||
|
||||
# Attempt to patch legacy helpers ...
|
||||
_patch_legacy_helpers(extracted_app_folder)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_php5(extracted_app_folder)
|
||||
_patch_legacy_php_versions(extracted_app_folder)
|
||||
|
||||
# Start register change on system
|
||||
related_to = [('app', app_instance_name)]
|
||||
operation_logger = OperationLogger('app_upgrade', related_to, env=env_dict)
|
||||
operation_logger.start()
|
||||
|
||||
# Execute App upgrade script
|
||||
os.system('chown -hR admin: %s' % INSTALL_TMP)
|
||||
|
@ -514,7 +517,7 @@ def app_upgrade(app=[], url=None, file=None):
|
|||
# Something wrong happened in Yunohost's code (most probably hook_exec)
|
||||
except Exception:
|
||||
import traceback
|
||||
error = m18n.n('unexpected_error', error=u"\n" + traceback.format_exc())
|
||||
error = m18n.n('unexpected_error', error="\n" + traceback.format_exc())
|
||||
logger.error(m18n.n("app_install_failed", app=app_instance_name, error=error))
|
||||
failure_message_with_debug_instructions = operation_logger.error(error)
|
||||
finally:
|
||||
|
@ -528,6 +531,12 @@ def app_upgrade(app=[], url=None, file=None):
|
|||
logger.error(m18n.n("app_upgrade_failed", app=app_instance_name, error=str(e)))
|
||||
failure_message_with_debug_instructions = operation_logger.error(str(e))
|
||||
|
||||
# We'll check that the app didn't brutally edit some system configuration
|
||||
manually_modified_files_after_install = manually_modified_files()
|
||||
manually_modified_files_by_app = set(manually_modified_files_after_install) - set(manually_modified_files_before_install)
|
||||
if manually_modified_files_by_app:
|
||||
logger.error("Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - " + '\n -'.join(manually_modified_files_by_app))
|
||||
|
||||
# If upgrade failed or broke the system,
|
||||
# raise an error and interrupt all other pending upgrades
|
||||
if upgrade_failed or broke_the_system:
|
||||
|
@ -591,6 +600,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback
|
||||
from yunohost.log import OperationLogger
|
||||
from yunohost.permission import user_permission_list, permission_create, permission_url, permission_delete, permission_sync_to_user, user_permission_update
|
||||
from yunohost.regenconf import manually_modified_files
|
||||
|
||||
# Fetch or extract sources
|
||||
if not os.path.exists(INSTALL_TMP):
|
||||
|
@ -679,12 +689,20 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
args_dict = {} if not args else \
|
||||
dict(urlparse.parse_qsl(args, keep_blank_values=True))
|
||||
args_odict = _parse_args_from_manifest(manifest, 'install', args=args_dict)
|
||||
args_list = [value[0] for value in args_odict.values()]
|
||||
args_list.append(app_instance_name)
|
||||
|
||||
# Validate domain / path availability for webapps
|
||||
_validate_and_normalize_webpath(manifest, args_odict, extracted_app_folder)
|
||||
|
||||
# build arg list tq
|
||||
args_list = [value[0] for value in args_odict.values()]
|
||||
args_list.append(app_instance_name)
|
||||
|
||||
# Attempt to patch legacy helpers ...
|
||||
_patch_legacy_helpers(extracted_app_folder)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_legacy_php_versions(extracted_app_folder)
|
||||
|
||||
# Prepare env. var. to pass to script
|
||||
env_dict = _make_environment_dict(args_odict)
|
||||
env_dict["YNH_APP_ID"] = app_id
|
||||
|
@ -695,6 +713,9 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
# Start register change on system
|
||||
operation_logger.extra.update({'env': env_dict})
|
||||
|
||||
# We'll check that the app didn't brutally edit some system configuration
|
||||
manually_modified_files_before_install = manually_modified_files()
|
||||
|
||||
# Tell the operation_logger to redact all password-type args
|
||||
# Also redact the % escaped version of the password that might appear in
|
||||
# the 'args' section of metadata (relevant for password with non-alphanumeric char)
|
||||
|
@ -722,12 +743,6 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
}
|
||||
_set_app_settings(app_instance_name, app_settings)
|
||||
|
||||
# Attempt to patch legacy helpers ...
|
||||
_patch_legacy_helpers(extracted_app_folder)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_php5(extracted_app_folder)
|
||||
|
||||
os.system('chown -R admin: ' + extracted_app_folder)
|
||||
|
||||
# Execute App install script
|
||||
|
@ -770,19 +785,25 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
# Something wrong happened in Yunohost's code (most probably hook_exec)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
error = m18n.n('unexpected_error', error=u"\n" + traceback.format_exc())
|
||||
error = m18n.n('unexpected_error', error="\n" + traceback.format_exc())
|
||||
logger.error(m18n.n("app_install_failed", app=app_id, error=error))
|
||||
failure_message_with_debug_instructions = operation_logger.error(error)
|
||||
finally:
|
||||
# Whatever happened (install success or failure) we check if it broke the system
|
||||
# and warn the user about it
|
||||
try:
|
||||
broke_the_system = False
|
||||
_assert_system_is_sane_for_app(manifest, "post")
|
||||
except Exception as e:
|
||||
broke_the_system = True
|
||||
logger.error(m18n.n("app_install_failed", app=app_id, error=str(e)))
|
||||
failure_message_with_debug_instructions = operation_logger.error(str(e))
|
||||
# If success so far, validate that app didn't break important stuff
|
||||
if not install_failed:
|
||||
try:
|
||||
broke_the_system = False
|
||||
_assert_system_is_sane_for_app(manifest, "post")
|
||||
except Exception as e:
|
||||
broke_the_system = True
|
||||
logger.error(m18n.n("app_install_failed", app=app_id, error=str(e)))
|
||||
failure_message_with_debug_instructions = operation_logger.error(str(e))
|
||||
|
||||
# We'll check that the app didn't brutally edit some system configuration
|
||||
manually_modified_files_after_install = manually_modified_files()
|
||||
manually_modified_files_by_app = set(manually_modified_files_after_install) - set(manually_modified_files_before_install)
|
||||
if manually_modified_files_by_app:
|
||||
logger.error("Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - " + '\n -'.join(manually_modified_files_by_app))
|
||||
|
||||
# If the install failed or broke the system, we remove it
|
||||
if install_failed or broke_the_system:
|
||||
|
@ -811,6 +832,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
os.path.join(extracted_app_folder, 'scripts/remove'),
|
||||
args=[app_instance_name], env=env_dict_remove
|
||||
)[0]
|
||||
|
||||
# Here again, calling hook_exec could fail miserably, or get
|
||||
# manually interrupted (by mistake or because script was stuck)
|
||||
# In that case we still want to proceed with the rest of the
|
||||
|
@ -818,7 +840,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
except (KeyboardInterrupt, EOFError, Exception):
|
||||
remove_retcode = -1
|
||||
import traceback
|
||||
logger.error(m18n.n('unexpected_error', error=u"\n" + traceback.format_exc()))
|
||||
logger.error(m18n.n('unexpected_error', error="\n" + traceback.format_exc()))
|
||||
|
||||
# Remove all permission in LDAP
|
||||
for permission_name in user_permission_list()["permissions"].keys():
|
||||
|
@ -878,16 +900,34 @@ def dump_app_log_extract_for_debugging(operation_logger):
|
|||
with open(operation_logger.log_path, "r") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
filters = [
|
||||
r"set [+-]x$",
|
||||
r"set [+-]o xtrace$",
|
||||
r"local \w+$",
|
||||
r"local legacy_args=.*$",
|
||||
r".*Helper used in legacy mode.*",
|
||||
r"args_array=.*$",
|
||||
r"local -A args_array$",
|
||||
r"ynh_handle_getopts_args",
|
||||
r"ynh_script_progression"
|
||||
]
|
||||
|
||||
filters = [re.compile(f_) for f_ in filters]
|
||||
|
||||
lines_to_display = []
|
||||
for line in lines:
|
||||
|
||||
if not ": " in line.strip():
|
||||
if ": " not in line.strip():
|
||||
continue
|
||||
|
||||
# A line typically looks like
|
||||
# 2019-10-19 16:10:27,611: DEBUG - + mysql -u piwigo --password=********** -B piwigo
|
||||
# And we just want the part starting by "DEBUG - "
|
||||
line = line.strip().split(": ", 1)[1]
|
||||
|
||||
if any(filter_.search(line) for filter_ in filters):
|
||||
continue
|
||||
|
||||
lines_to_display.append(line)
|
||||
|
||||
if line.endswith("+ ynh_exit_properly") or " + ynh_die " in line:
|
||||
|
@ -931,7 +971,7 @@ def app_remove(operation_logger, app):
|
|||
|
||||
# Apply dirty patch to make php5 apps compatible with php7 (e.g. the remove
|
||||
# script might date back from jessie install)
|
||||
_patch_php5(app_setting_path)
|
||||
_patch_legacy_php_versions(app_setting_path)
|
||||
|
||||
manifest = _get_manifest_of_app(app_setting_path)
|
||||
|
||||
|
@ -960,7 +1000,7 @@ def app_remove(operation_logger, app):
|
|||
except (KeyboardInterrupt, EOFError, Exception):
|
||||
ret = -1
|
||||
import traceback
|
||||
logger.error(m18n.n('unexpected_error', error=u"\n" + traceback.format_exc()))
|
||||
logger.error(m18n.n('unexpected_error', error="\n" + traceback.format_exc()))
|
||||
|
||||
if ret == 0:
|
||||
logger.success(m18n.n('app_removed', app=app))
|
||||
|
@ -1290,9 +1330,9 @@ def app_ssowatconf():
|
|||
|
||||
uris = []
|
||||
if perm_info['url'] is not None:
|
||||
uris += [perm_info['url']]
|
||||
uris += [perm_info['url'].rstrip('/')]
|
||||
if perm_info['additional_urls'] != [None]:
|
||||
uris += perm_info['additional_urls']
|
||||
uris += [uri.rstrip('/') for uri in perm_info['additional_urls']]
|
||||
|
||||
permissions[perm_name] = {
|
||||
"users": perm_info['corresponding_users'],
|
||||
|
@ -1790,6 +1830,9 @@ def _get_app_settings(app_id):
|
|||
with open(os.path.join(
|
||||
APPS_SETTING_PATH, app_id, 'settings.yml')) as f:
|
||||
settings = yaml.load(f)
|
||||
# If label contains unicode char, this may later trigger issues when building strings...
|
||||
# FIXME: this should be propagated to read_yaml so that this fix applies everywhere I think...
|
||||
settings = {k:_encode_string(v) for k,v in settings.items()}
|
||||
if app_id == settings['id']:
|
||||
return settings
|
||||
except (IOError, TypeError, KeyError):
|
||||
|
@ -2101,12 +2144,14 @@ def _fetch_app_from_git(app):
|
|||
else:
|
||||
app_dict = _load_apps_catalog()["apps"]
|
||||
|
||||
if app not in app_dict:
|
||||
app_id, _ = _parse_app_instance_name(app)
|
||||
|
||||
if app_id not in app_dict:
|
||||
raise YunohostError('app_unknown')
|
||||
elif 'git' not in app_dict[app]:
|
||||
elif 'git' not in app_dict[app_id]:
|
||||
raise YunohostError('app_unsupported_remote_type')
|
||||
|
||||
app_info = app_dict[app]
|
||||
app_info = app_dict[app_id]
|
||||
app_info['manifest']['lastUpdate'] = app_info['lastUpdate']
|
||||
manifest = app_info['manifest']
|
||||
url = app_info['git']['url']
|
||||
|
@ -2245,6 +2290,11 @@ def _encode_string(value):
|
|||
|
||||
def _check_manifest_requirements(manifest, app_instance_name):
|
||||
"""Check if required packages are met from the manifest"""
|
||||
|
||||
packaging_format = int(manifest.get('packaging_format', 0))
|
||||
if packaging_format not in [0, 1]:
|
||||
raise YunohostError("app_packaging_format_not_supported")
|
||||
|
||||
requirements = manifest.get('requirements', dict())
|
||||
|
||||
if not requirements:
|
||||
|
@ -2255,6 +2305,7 @@ def _check_manifest_requirements(manifest, app_instance_name):
|
|||
# Iterate over requirements
|
||||
for pkgname, spec in requirements.items():
|
||||
if not packages.meets_version_specifier(pkgname, spec):
|
||||
version = packages.ynh_packages_version()[pkgname]["version"]
|
||||
raise YunohostError('app_requirements_unmeet',
|
||||
pkgname=pkgname, version=version,
|
||||
spec=spec, app=app_instance_name)
|
||||
|
@ -2306,126 +2357,134 @@ def _parse_args_for_action(action, args={}):
|
|||
return _parse_args_in_yunohost_format(args, action_args)
|
||||
|
||||
|
||||
def _parse_args_in_yunohost_format(args, action_args):
|
||||
"""Parse arguments store in either manifest.json or actions.json
|
||||
def _parse_args_in_yunohost_format(user_answers, argument_questions):
|
||||
"""Parse arguments store in either manifest.json or actions.json or from a
|
||||
config panel against the user answers when they are present.
|
||||
|
||||
Keyword arguments:
|
||||
user_answers -- a dictionnary of arguments from the user (generally
|
||||
empty in CLI, filed from the admin interface)
|
||||
argument_questions -- the arguments description store in yunohost
|
||||
format from actions.json/toml, manifest.json/toml
|
||||
or config_panel.json/toml
|
||||
"""
|
||||
from yunohost.domain import domain_list, _get_maindomain
|
||||
from yunohost.user import user_info, user_list
|
||||
from yunohost.user import user_list
|
||||
|
||||
args_dict = OrderedDict()
|
||||
parsed_answers_dict = OrderedDict()
|
||||
|
||||
for arg in action_args:
|
||||
arg_name = arg['name']
|
||||
arg_type = arg.get('type', 'string')
|
||||
arg_default = arg.get('default', None)
|
||||
arg_choices = arg.get('choices', [])
|
||||
arg_value = None
|
||||
for question in argument_questions:
|
||||
question_name = question['name']
|
||||
question_type = question.get('type', 'string')
|
||||
question_default = question.get('default', None)
|
||||
question_choices = question.get('choices', [])
|
||||
question_value = None
|
||||
|
||||
# Transpose default value for boolean type and set it to
|
||||
# false if not defined.
|
||||
if arg_type == 'boolean':
|
||||
arg_default = 1 if arg_default else 0
|
||||
if question_type == 'boolean':
|
||||
question_default = 1 if question_default else 0
|
||||
|
||||
# do not print for webadmin
|
||||
if arg_type == 'display_text' and msettings.get('interface') != 'api':
|
||||
print(_value_for_locale(arg['ask']))
|
||||
if question_type == 'display_text' and msettings.get('interface') != 'api':
|
||||
print(_value_for_locale(question['ask']))
|
||||
continue
|
||||
|
||||
# Attempt to retrieve argument value
|
||||
if arg_name in args:
|
||||
arg_value = args[arg_name]
|
||||
if question_name in user_answers:
|
||||
question_value = user_answers[question_name]
|
||||
else:
|
||||
if 'ask' in arg:
|
||||
if 'ask' in question:
|
||||
# Retrieve proper ask string
|
||||
ask_string = _value_for_locale(arg['ask'])
|
||||
text_for_user_input_in_cli = _value_for_locale(question['ask'])
|
||||
|
||||
# Append extra strings
|
||||
if arg_type == 'boolean':
|
||||
ask_string += ' [yes | no]'
|
||||
elif arg_choices:
|
||||
ask_string += ' [{0}]'.format(' | '.join(arg_choices))
|
||||
if question_type == 'boolean':
|
||||
text_for_user_input_in_cli += ' [yes | no]'
|
||||
elif question_choices:
|
||||
text_for_user_input_in_cli += ' [{0}]'.format(' | '.join(question_choices))
|
||||
|
||||
if arg_default is not None:
|
||||
if arg_type == 'boolean':
|
||||
ask_string += ' (default: {0})'.format("yes" if arg_default == 1 else "no")
|
||||
if question_default is not None:
|
||||
if question_type == 'boolean':
|
||||
text_for_user_input_in_cli += ' (default: {0})'.format("yes" if question_default == 1 else "no")
|
||||
else:
|
||||
ask_string += ' (default: {0})'.format(arg_default)
|
||||
text_for_user_input_in_cli += ' (default: {0})'.format(question_default)
|
||||
|
||||
# Check for a password argument
|
||||
is_password = True if arg_type == 'password' else False
|
||||
is_password = True if question_type == 'password' else False
|
||||
|
||||
if arg_type == 'domain':
|
||||
arg_default = _get_maindomain()
|
||||
ask_string += ' (default: {0})'.format(arg_default)
|
||||
if question_type == 'domain':
|
||||
question_default = _get_maindomain()
|
||||
text_for_user_input_in_cli += ' (default: {0})'.format(question_default)
|
||||
msignals.display(m18n.n('domains_available'))
|
||||
for domain in domain_list()['domains']:
|
||||
msignals.display("- {}".format(domain))
|
||||
|
||||
elif arg_type == 'user':
|
||||
elif question_type == 'user':
|
||||
msignals.display(m18n.n('users_available'))
|
||||
for user in user_list()['users'].keys():
|
||||
msignals.display("- {}".format(user))
|
||||
|
||||
elif arg_type == 'password':
|
||||
elif question_type == 'password':
|
||||
msignals.display(m18n.n('good_practices_about_user_password'))
|
||||
|
||||
try:
|
||||
input_string = msignals.prompt(ask_string, is_password)
|
||||
input_string = msignals.prompt(text_for_user_input_in_cli, is_password)
|
||||
except NotImplementedError:
|
||||
input_string = None
|
||||
if (input_string == '' or input_string is None) \
|
||||
and arg_default is not None:
|
||||
arg_value = arg_default
|
||||
and question_default is not None:
|
||||
question_value = question_default
|
||||
else:
|
||||
arg_value = input_string
|
||||
elif arg_default is not None:
|
||||
arg_value = arg_default
|
||||
question_value = input_string
|
||||
elif question_default is not None:
|
||||
question_value = question_default
|
||||
|
||||
# If the value is empty (none or '')
|
||||
# then check if arg is optional or not
|
||||
if arg_value is None or arg_value == '':
|
||||
if arg.get("optional", False):
|
||||
# then check if question is optional or not
|
||||
if question_value is None or question_value == '':
|
||||
if question.get("optional", False):
|
||||
# Argument is optional, keep an empty value
|
||||
# and that's all for this arg !
|
||||
args_dict[arg_name] = ('', arg_type)
|
||||
# and that's all for this question!
|
||||
parsed_answers_dict[question_name] = ('', question_type)
|
||||
continue
|
||||
else:
|
||||
# The argument is required !
|
||||
raise YunohostError('app_argument_required', name=arg_name)
|
||||
raise YunohostError('app_argument_required', name=question_name)
|
||||
|
||||
# Validate argument choice
|
||||
if arg_choices and arg_value not in arg_choices:
|
||||
raise YunohostError('app_argument_choice_invalid', name=arg_name, choices=', '.join(arg_choices))
|
||||
if question_choices and question_value not in question_choices:
|
||||
raise YunohostError('app_argument_choice_invalid', name=question_name, choices=', '.join(question_choices))
|
||||
|
||||
# Validate argument type
|
||||
if arg_type == 'domain':
|
||||
if arg_value not in domain_list()['domains']:
|
||||
raise YunohostError('app_argument_invalid', name=arg_name, error=m18n.n('domain_unknown'))
|
||||
elif arg_type == 'user':
|
||||
if not arg_value in user_list()["users"].keys():
|
||||
raise YunohostError('app_argument_invalid', name=arg_name, error=m18n.n('user_unknown', user=arg_value))
|
||||
elif arg_type == 'app':
|
||||
if not _is_installed(arg_value):
|
||||
raise YunohostError('app_argument_invalid', name=arg_name, error=m18n.n('app_unknown'))
|
||||
elif arg_type == 'boolean':
|
||||
if isinstance(arg_value, bool):
|
||||
arg_value = 1 if arg_value else 0
|
||||
if question_type == 'domain':
|
||||
if question_value not in domain_list()['domains']:
|
||||
raise YunohostError('app_argument_invalid', name=question_name, error=m18n.n('domain_unknown'))
|
||||
elif question_type == 'user':
|
||||
if question_value not in user_list()["users"].keys():
|
||||
raise YunohostError('app_argument_invalid', name=question_name, error=m18n.n('user_unknown', user=question_value))
|
||||
elif question_type == 'app':
|
||||
if not _is_installed(question_value):
|
||||
raise YunohostError('app_argument_invalid', name=question_name, error=m18n.n('app_unknown'))
|
||||
elif question_type == 'boolean':
|
||||
if isinstance(question_value, bool):
|
||||
question_value = 1 if question_value else 0
|
||||
else:
|
||||
if str(arg_value).lower() in ["1", "yes", "y"]:
|
||||
arg_value = 1
|
||||
elif str(arg_value).lower() in ["0", "no", "n"]:
|
||||
arg_value = 0
|
||||
if str(question_value).lower() in ["1", "yes", "y"]:
|
||||
question_value = 1
|
||||
elif str(question_value).lower() in ["0", "no", "n"]:
|
||||
question_value = 0
|
||||
else:
|
||||
raise YunohostError('app_argument_choice_invalid', name=arg_name, choices='yes, no, y, n, 1, 0')
|
||||
elif arg_type == 'password':
|
||||
raise YunohostError('app_argument_choice_invalid', name=question_name, choices='yes, no, y, n, 1, 0')
|
||||
elif question_type == 'password':
|
||||
forbidden_chars = "{}"
|
||||
if any(char in arg_value for char in forbidden_chars):
|
||||
if any(char in question_value for char in forbidden_chars):
|
||||
raise YunohostError('pattern_password_app', forbidden_chars=forbidden_chars)
|
||||
from yunohost.utils.password import assert_password_is_strong_enough
|
||||
assert_password_is_strong_enough('user', arg_value)
|
||||
args_dict[arg_name] = (arg_value, arg_type)
|
||||
assert_password_is_strong_enough('user', question_value)
|
||||
parsed_answers_dict[question_name] = (question_value, question_type)
|
||||
|
||||
return args_dict
|
||||
return parsed_answers_dict
|
||||
|
||||
|
||||
def _validate_and_normalize_webpath(manifest, args_dict, app_folder):
|
||||
|
@ -2568,12 +2627,6 @@ def _read_apps_catalog_list():
|
|||
Read the json corresponding to the list of apps catalogs
|
||||
"""
|
||||
|
||||
# Legacy code - can be removed after moving to buster (if the migration got merged before buster)
|
||||
if os.path.exists('/etc/yunohost/appslists.json'):
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
migration = _get_migration_by_name("futureproof_apps_catalog_system")
|
||||
migration.run()
|
||||
|
||||
try:
|
||||
list_ = read_yaml(APPS_CATALOG_CONF)
|
||||
# Support the case where file exists but is empty
|
||||
|
@ -2710,21 +2763,6 @@ def is_true(arg):
|
|||
return True if arg else False
|
||||
|
||||
|
||||
def random_password(length=8):
|
||||
"""
|
||||
Generate a random string
|
||||
|
||||
Keyword arguments:
|
||||
length -- The string length to generate
|
||||
|
||||
"""
|
||||
import string
|
||||
import random
|
||||
|
||||
char_set = string.ascii_uppercase + string.digits + string.ascii_lowercase
|
||||
return ''.join([random.SystemRandom().choice(char_set) for x in range(length)])
|
||||
|
||||
|
||||
def unstable_apps():
|
||||
|
||||
output = []
|
||||
|
@ -2745,8 +2783,8 @@ def _assert_system_is_sane_for_app(manifest, when):
|
|||
|
||||
# Some apps use php-fpm or php5-fpm which is now php7.0-fpm
|
||||
def replace_alias(service):
|
||||
if service in ["php-fpm", "php5-fpm"]:
|
||||
return "php7.0-fpm"
|
||||
if service in ["php-fpm", "php5-fpm", "php7.0-fpm"]:
|
||||
return "php7.3-fpm"
|
||||
else:
|
||||
return service
|
||||
services = [replace_alias(s) for s in services]
|
||||
|
@ -2754,7 +2792,7 @@ def _assert_system_is_sane_for_app(manifest, when):
|
|||
# We only check those, mostly to ignore "custom" services
|
||||
# (added by apps) and because those are the most popular
|
||||
# services
|
||||
service_filter = ["nginx", "php7.0-fpm", "mysql", "postfix"]
|
||||
service_filter = ["nginx", "php7.3-fpm", "mysql", "postfix"]
|
||||
services = [str(s) for s in services if s in service_filter]
|
||||
|
||||
if "nginx" not in services:
|
||||
|
@ -2779,11 +2817,24 @@ def _assert_system_is_sane_for_app(manifest, when):
|
|||
raise YunohostError("this_action_broke_dpkg")
|
||||
|
||||
|
||||
def _patch_php5(app_folder):
|
||||
LEGACY_PHP_VERSION_REPLACEMENTS = [
|
||||
("/etc/php5", "/etc/php/7.3"),
|
||||
("/etc/php/7.0", "/etc/php/7.3"),
|
||||
("/var/run/php5-fpm", "/var/run/php/php7.3-fpm"),
|
||||
("/var/run/php/php7.0-fpm", "/var/run/php/php7.3-fpm"),
|
||||
("php5", "php7.3"),
|
||||
("php7.0", "php7.3"),
|
||||
('phpversion="${phpversion:-7.0}"', 'phpversion="${phpversion:-7.3}"'), # Many helpers like the composer ones use 7.0 by default ...
|
||||
('"$phpversion" == "7.0"', '$(bc <<< "$phpversion >= 7.3") -eq 1') # patch ynh_install_php to refuse installing/removing php <= 7.3
|
||||
]
|
||||
|
||||
|
||||
def _patch_legacy_php_versions(app_folder):
|
||||
|
||||
files_to_patch = []
|
||||
files_to_patch.extend(glob.glob("%s/conf/*" % app_folder))
|
||||
files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder))
|
||||
files_to_patch.extend(glob.glob("%s/scripts/*/*" % app_folder))
|
||||
files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder))
|
||||
files_to_patch.append("%s/manifest.json" % app_folder)
|
||||
files_to_patch.append("%s/manifest.toml" % app_folder)
|
||||
|
@ -2794,12 +2845,32 @@ def _patch_php5(app_folder):
|
|||
if not os.path.isfile(filename):
|
||||
continue
|
||||
|
||||
c = "sed -i -e 's@/etc/php5@/etc/php/7.0@g' " \
|
||||
"-e 's@/var/run/php5-fpm@/var/run/php/php7.0-fpm@g' " \
|
||||
"-e 's@php5@php7.0@g' " \
|
||||
"%s" % filename
|
||||
c = "sed -i " \
|
||||
+ "".join("-e 's@{pattern}@{replace}@g' ".format(pattern=p, replace=r) for p, r in LEGACY_PHP_VERSION_REPLACEMENTS) \
|
||||
+ "%s" % filename
|
||||
os.system(c)
|
||||
|
||||
|
||||
def _patch_legacy_php_versions_in_settings(app_folder):
|
||||
|
||||
settings = read_yaml(os.path.join(app_folder, 'settings.yml'))
|
||||
|
||||
if settings.get("fpm_config_dir") == "/etc/php/7.0/fpm":
|
||||
settings["fpm_config_dir"] = "/etc/php/7.3/fpm"
|
||||
if settings.get("fpm_service") == "php7.0-fpm":
|
||||
settings["fpm_service"] = "php7.3-fpm"
|
||||
if settings.get("phpversion") == "7.0":
|
||||
settings["phpversion"] = "7.3"
|
||||
|
||||
# We delete these checksums otherwise the file will appear as manually modified
|
||||
list_to_remove = ["checksum__etc_php_7.0_fpm_pool",
|
||||
"checksum__etc_nginx_conf.d"]
|
||||
settings = {k: v for k, v in settings.items()
|
||||
if not any(k.startswith(to_remove) for to_remove in list_to_remove)}
|
||||
|
||||
write_to_yaml(app_folder + '/settings.yml', settings)
|
||||
|
||||
|
||||
def _patch_legacy_helpers(app_folder):
|
||||
|
||||
files_to_patch = []
|
||||
|
@ -2811,29 +2882,46 @@ def _patch_legacy_helpers(app_folder):
|
|||
# sudo yunohost app initdb $db_user -p $db_pwd
|
||||
# by
|
||||
# ynh_mysql_setup_db --db_user=$db_user --db_name=$db_user --db_pwd=$db_pwd
|
||||
"yunohost app initdb": (
|
||||
r"(sudo )?yunohost app initdb \"?(\$\{?\w+\}?)\"?\s+-p\s\"?(\$\{?\w+\}?)\"?",
|
||||
r"ynh_mysql_setup_db --db_user=\2 --db_name=\2 --db_pwd=\3"),
|
||||
"yunohost app initdb": {
|
||||
"pattern": r"(sudo )?yunohost app initdb \"?(\$\{?\w+\}?)\"?\s+-p\s\"?(\$\{?\w+\}?)\"?",
|
||||
"replace": r"ynh_mysql_setup_db --db_user=\2 --db_name=\2 --db_pwd=\3",
|
||||
"important": True
|
||||
},
|
||||
# Replace
|
||||
# sudo yunohost app checkport whaterver
|
||||
# by
|
||||
# ynh_port_available whatever
|
||||
"yunohost app checkport": (
|
||||
r"(sudo )?yunohost app checkport",
|
||||
r"ynh_port_available"),
|
||||
"yunohost app checkport": {
|
||||
"pattern": r"(sudo )?yunohost app checkport",
|
||||
"replace": r"ynh_port_available",
|
||||
"important": True
|
||||
},
|
||||
# We can't migrate easily port-available
|
||||
# .. but at the time of writing this code, only two non-working apps are using it.
|
||||
"yunohost tools port-available": (None, None),
|
||||
"yunohost tools port-available": {"important":True},
|
||||
# Replace
|
||||
# yunohost app checkurl "${domain}${path_url}" -a "${app}"
|
||||
# by
|
||||
# ynh_webpath_register --app=${app} --domain=${domain} --path_url=${path_url}
|
||||
"yunohost app checkurl": (
|
||||
r"(sudo )?yunohost app checkurl \"?(\$\{?\w+\}?)\/?(\$\{?\w+\}?)\"?\s+-a\s\"?(\$\{?\w+\}?)\"?",
|
||||
r"ynh_webpath_register --app=\4 --domain=\2 --path_url=\3"),
|
||||
"yunohost app checkurl": {
|
||||
"pattern": r"(sudo )?yunohost app checkurl \"?(\$\{?\w+\}?)\/?(\$\{?\w+\}?)\"?\s+-a\s\"?(\$\{?\w+\}?)\"?",
|
||||
"replace": r"ynh_webpath_register --app=\4 --domain=\2 --path_url=\3",
|
||||
"important": True
|
||||
},
|
||||
# Remove
|
||||
# Automatic diagnosis data from YunoHost
|
||||
# __PRE_TAG1__$(yunohost tools diagnosis | ...)__PRE_TAG2__"
|
||||
#
|
||||
"yunohost tools diagnosis": {
|
||||
"pattern": r"(Automatic diagnosis data from YunoHost( *\n)*)? *(__\w+__)? *\$\(yunohost tools diagnosis.*\)(__\w+__)?",
|
||||
"replace": r"",
|
||||
"important": False
|
||||
}
|
||||
}
|
||||
|
||||
stuff_to_replace_compiled = {h: (re.compile(r[0]), r[1]) if r[0] else (None,None) for h, r in stuff_to_replace.items()}
|
||||
for helper, infos in stuff_to_replace.items():
|
||||
infos["pattern"] = re.compile(infos["pattern"]) if infos.get("pattern") else None
|
||||
infos["replace"] = infos.get("replace")
|
||||
|
||||
for filename in files_to_patch:
|
||||
|
||||
|
@ -2843,18 +2931,20 @@ def _patch_legacy_helpers(app_folder):
|
|||
|
||||
content = read_file(filename)
|
||||
replaced_stuff = False
|
||||
show_warning = False
|
||||
|
||||
for helper, regexes in stuff_to_replace_compiled.items():
|
||||
pattern, replace = regexes
|
||||
for helper, infos in stuff_to_replace.items():
|
||||
# If helper is used, attempt to patch the file
|
||||
if helper in content and pattern != "":
|
||||
content = pattern.sub(replace, content)
|
||||
if helper in content and infos["pattern"]:
|
||||
content = infos["pattern"].sub(infos["replace"], content)
|
||||
replaced_stuff = True
|
||||
if infos["important"]:
|
||||
show_warning = True
|
||||
|
||||
# If the helpert is *still* in the content, it means that we
|
||||
# couldn't patch the deprecated helper in the previous lines. In
|
||||
# that case, abort the install or whichever step is performed
|
||||
if helper in content:
|
||||
if helper in content and infos["important"]:
|
||||
raise YunohostError("This app is likely pretty old and uses deprecated / outdated helpers that can't be migrated easily. It can't be installed anymore.")
|
||||
|
||||
if replaced_stuff:
|
||||
|
@ -2870,5 +2960,7 @@ def _patch_legacy_helpers(app_folder):
|
|||
|
||||
# Actually write the new content in the file
|
||||
write_to_file(filename, content)
|
||||
|
||||
if show_warning:
|
||||
# And complain about those damn deprecated helpers
|
||||
logger.error("/!\ Packagers ! This app uses a very old deprecated helpers ... Yunohost automatically patched the helpers to use the new recommended practice, but please do consider fixing the upstream code right now ...")
|
||||
|
|
|
@ -43,7 +43,13 @@ from moulinette.utils.log import getActionLogger
|
|||
from moulinette.utils.filesystem import read_file, mkdir, write_to_yaml, read_yaml
|
||||
|
||||
from yunohost.app import (
|
||||
app_info, _is_installed, _parse_app_instance_name, _patch_php5, dump_app_log_extract_for_debugging, _patch_legacy_helpers
|
||||
app_info, _is_installed,
|
||||
_parse_app_instance_name,
|
||||
dump_app_log_extract_for_debugging,
|
||||
_patch_legacy_helpers,
|
||||
_patch_legacy_php_versions,
|
||||
_patch_legacy_php_versions_in_settings,
|
||||
LEGACY_PHP_VERSION_REPLACEMENTS
|
||||
)
|
||||
from yunohost.hook import (
|
||||
hook_list, hook_info, hook_callback, hook_exec, CUSTOM_HOOK_FOLDER
|
||||
|
@ -219,8 +225,8 @@ class BackupManager():
|
|||
backup_manager = BackupManager(name="mybackup", description="bkp things")
|
||||
|
||||
# Add backup method to apply
|
||||
backup_manager.add(BackupMethod.create('copy','/mnt/local_fs'))
|
||||
backup_manager.add(BackupMethod.create('tar','/mnt/remote_fs'))
|
||||
backup_manager.add(BackupMethod.create('copy', backup_manager, '/mnt/local_fs'))
|
||||
backup_manager.add(BackupMethod.create('tar', backup_manager, '/mnt/remote_fs'))
|
||||
|
||||
# Define targets to be backuped
|
||||
backup_manager.set_system_targets(["data"])
|
||||
|
@ -752,7 +758,7 @@ class BackupManager():
|
|||
|
||||
for method in self.methods:
|
||||
logger.debug(m18n.n('backup_applying_method_' + method.method_name))
|
||||
method.mount_and_backup(self)
|
||||
method.mount_and_backup()
|
||||
logger.debug(m18n.n('backup_method_' + method.method_name + '_finished'))
|
||||
|
||||
def _compute_backup_size(self):
|
||||
|
@ -851,7 +857,7 @@ class RestoreManager():
|
|||
self.info = backup_info(name, with_details=True)
|
||||
self.archive_path = self.info['path']
|
||||
self.name = name
|
||||
self.method = BackupMethod.create(method)
|
||||
self.method = BackupMethod.create(method, self)
|
||||
self.targets = BackupRestoreTargetsManager()
|
||||
|
||||
#
|
||||
|
@ -956,6 +962,9 @@ class RestoreManager():
|
|||
# These are the hooks on the current installation
|
||||
available_restore_system_hooks = hook_list("restore")["hooks"]
|
||||
|
||||
custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, 'restore')
|
||||
filesystem.mkdir(custom_restore_hook_folder, 755, parents=True, force=True)
|
||||
|
||||
for system_part in target_list:
|
||||
# By default, we'll use the restore hooks on the current install
|
||||
# if available
|
||||
|
@ -967,24 +976,25 @@ class RestoreManager():
|
|||
continue
|
||||
|
||||
# Otherwise, attempt to find it (or them?) in the archive
|
||||
hook_paths = '{:s}/hooks/restore/*-{:s}'.format(self.work_dir, system_part)
|
||||
hook_paths = glob(hook_paths)
|
||||
|
||||
# If we didn't find it, we ain't gonna be able to restore it
|
||||
if len(hook_paths) == 0:
|
||||
if system_part not in self.info['system'] or\
|
||||
'paths' not in self.info['system'][system_part] or\
|
||||
len(self.info['system'][system_part]['paths']) == 0:
|
||||
logger.exception(m18n.n('restore_hook_unavailable', part=system_part))
|
||||
self.targets.set_result("system", system_part, "Skipped")
|
||||
continue
|
||||
|
||||
hook_paths = self.info['system'][system_part]['paths']
|
||||
hook_paths = [ 'hooks/restore/%s' % os.path.basename(p) for p in hook_paths ]
|
||||
|
||||
# Otherwise, add it from the archive to the system
|
||||
# FIXME: Refactor hook_add and use it instead
|
||||
custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, 'restore')
|
||||
filesystem.mkdir(custom_restore_hook_folder, 755, True)
|
||||
for hook_path in hook_paths:
|
||||
logger.debug("Adding restoration script '%s' to the system "
|
||||
"from the backup archive '%s'", hook_path,
|
||||
self.archive_path)
|
||||
shutil.copy(hook_path, custom_restore_hook_folder)
|
||||
self.method.copy(hook_path, custom_restore_hook_folder)
|
||||
|
||||
def set_apps_targets(self, apps=[]):
|
||||
"""
|
||||
|
@ -1000,10 +1010,20 @@ class RestoreManager():
|
|||
logger.error(m18n.n('backup_archive_app_not_found',
|
||||
app=app))
|
||||
|
||||
self.targets.set_wanted("apps",
|
||||
apps,
|
||||
self.info['apps'].keys(),
|
||||
unknown_error)
|
||||
to_be_restored = self.targets.set_wanted("apps",
|
||||
apps,
|
||||
self.info['apps'].keys(),
|
||||
unknown_error)
|
||||
|
||||
# If all apps to restore are already installed, stop right here.
|
||||
# Otherwise, if at least one app can be restored, we keep going on
|
||||
# because those which can be restored will indeed be restored
|
||||
already_installed = [app for app in to_be_restored if _is_installed(app)]
|
||||
if already_installed != []:
|
||||
if already_installed == to_be_restored:
|
||||
raise YunohostError("restore_already_installed_apps", apps=', '.join(already_installed))
|
||||
else:
|
||||
logger.warning(m18n.n("restore_already_installed_apps", apps=', '.join(already_installed)))
|
||||
|
||||
#
|
||||
# Archive mounting #
|
||||
|
@ -1044,7 +1064,7 @@ class RestoreManager():
|
|||
|
||||
filesystem.mkdir(self.work_dir, parents=True)
|
||||
|
||||
self.method.mount(self)
|
||||
self.method.mount()
|
||||
|
||||
self._read_info_files()
|
||||
|
||||
|
@ -1127,7 +1147,7 @@ class RestoreManager():
|
|||
self._postinstall_if_needed()
|
||||
|
||||
# Apply dirty patch to redirect php5 file on php7
|
||||
self._patch_backup_csv_file()
|
||||
self._patch_legacy_php_versions_in_csv_file()
|
||||
|
||||
self._restore_system()
|
||||
self._restore_apps()
|
||||
|
@ -1136,9 +1156,9 @@ class RestoreManager():
|
|||
finally:
|
||||
self.clean()
|
||||
|
||||
def _patch_backup_csv_file(self):
|
||||
def _patch_legacy_php_versions_in_csv_file(self):
|
||||
"""
|
||||
Apply dirty patch to redirect php5 file on php7
|
||||
Apply dirty patch to redirect php5 and php7.0 files to php7.3
|
||||
"""
|
||||
|
||||
backup_csv = os.path.join(self.work_dir, 'backup.csv')
|
||||
|
@ -1146,32 +1166,27 @@ class RestoreManager():
|
|||
if not os.path.isfile(backup_csv):
|
||||
return
|
||||
|
||||
contains_php5 = False
|
||||
replaced_something = False
|
||||
with open(backup_csv) as csvfile:
|
||||
reader = csv.DictReader(csvfile, fieldnames=['source', 'dest'])
|
||||
newlines = []
|
||||
for row in reader:
|
||||
if 'php5' in row['source']:
|
||||
contains_php5 = True
|
||||
row['source'] = row['source'].replace('/etc/php5', '/etc/php/7.0') \
|
||||
.replace('/var/run/php5-fpm', '/var/run/php/php7.0-fpm') \
|
||||
.replace('php5', 'php7')
|
||||
for pattern, replace in LEGACY_PHP_VERSION_REPLACEMENTS:
|
||||
if pattern in row['source']:
|
||||
replaced_something = True
|
||||
row['source'] = row['source'].replace(pattern, replace)
|
||||
|
||||
newlines.append(row)
|
||||
|
||||
if not contains_php5:
|
||||
if not replaced_something:
|
||||
return
|
||||
|
||||
try:
|
||||
with open(backup_csv, 'w') as csvfile:
|
||||
writer = csv.DictWriter(csvfile,
|
||||
fieldnames=['source', 'dest'],
|
||||
quoting=csv.QUOTE_ALL)
|
||||
for row in newlines:
|
||||
writer.writerow(row)
|
||||
except (IOError, OSError, csv.Error) as e:
|
||||
logger.warning(m18n.n('backup_php5_to_php7_migration_may_fail',
|
||||
error=str(e)))
|
||||
with open(backup_csv, 'w') as csvfile:
|
||||
writer = csv.DictWriter(csvfile,
|
||||
fieldnames=['source', 'dest'],
|
||||
quoting=csv.QUOTE_ALL)
|
||||
for row in newlines:
|
||||
writer.writerow(row)
|
||||
|
||||
def _restore_system(self):
|
||||
""" Restore user and system parts """
|
||||
|
@ -1230,12 +1245,11 @@ class RestoreManager():
|
|||
#
|
||||
# Legacy code
|
||||
if not "all_users" in user_group_list()["groups"].keys():
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
setup_group_permission = _get_migration_by_name("setup_group_permission")
|
||||
from yunohost.utils.legacy import SetupGroupPermissions
|
||||
# Update LDAP schema restart slapd
|
||||
logger.info(m18n.n("migration_0011_update_LDAP_schema"))
|
||||
regen_conf(names=['slapd'], force=True)
|
||||
setup_group_permission.migrate_LDAP_db()
|
||||
SetupGroupPermissions.migrate_LDAP_db()
|
||||
|
||||
# Remove all permission for all app which is still in the LDAP
|
||||
for permission_name in user_permission_list(ignore_system_perms=True)["permissions"].keys():
|
||||
|
@ -1304,13 +1318,6 @@ class RestoreManager():
|
|||
else:
|
||||
shutil.copy2(s, d)
|
||||
|
||||
# Start register change on system
|
||||
related_to = [('app', app_instance_name)]
|
||||
operation_logger = OperationLogger('backup_restore_app', related_to)
|
||||
operation_logger.start()
|
||||
|
||||
logger.info(m18n.n("app_start_restore", app=app_instance_name))
|
||||
|
||||
# Check if the app is not already installed
|
||||
if _is_installed(app_instance_name):
|
||||
logger.error(m18n.n('restore_already_installed_app',
|
||||
|
@ -1318,6 +1325,13 @@ class RestoreManager():
|
|||
self.targets.set_result("apps", app_instance_name, "Error")
|
||||
return
|
||||
|
||||
# Start register change on system
|
||||
related_to = [('app', app_instance_name)]
|
||||
operation_logger = OperationLogger('backup_restore_app', related_to)
|
||||
operation_logger.start()
|
||||
|
||||
logger.info(m18n.n("app_start_restore", app=app_instance_name))
|
||||
|
||||
app_dir_in_archive = os.path.join(self.work_dir, 'apps', app_instance_name)
|
||||
app_backup_in_archive = os.path.join(app_dir_in_archive, 'backup')
|
||||
app_settings_in_archive = os.path.join(app_dir_in_archive, 'settings')
|
||||
|
@ -1327,7 +1341,8 @@ class RestoreManager():
|
|||
_patch_legacy_helpers(app_settings_in_archive)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_php5(app_settings_in_archive)
|
||||
_patch_legacy_php_versions(app_settings_in_archive)
|
||||
_patch_legacy_php_versions_in_settings(app_settings_in_archive)
|
||||
|
||||
# Delete _common.sh file in backup
|
||||
common_file = os.path.join(app_backup_in_archive, '_common.sh')
|
||||
|
@ -1385,9 +1400,8 @@ class RestoreManager():
|
|||
else:
|
||||
# Otherwise, we need to migrate the legacy permissions of this
|
||||
# app (included in its settings.yml)
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
setup_group_permission = _get_migration_by_name("setup_group_permission")
|
||||
setup_group_permission.migrate_app_permission(app=app_instance_name)
|
||||
from yunohost.utils.legacy import SetupGroupPermissions
|
||||
SetupGroupPermissions.migrate_app_permission(app=app_instance_name)
|
||||
|
||||
# Migrate old settings
|
||||
if app_setting(app_instance_name, 'skipped_uris') is not None or \
|
||||
|
@ -1517,19 +1531,19 @@ class BackupMethod(object):
|
|||
method_name
|
||||
|
||||
Public methods:
|
||||
mount_and_backup(self, backup_manager)
|
||||
mount(self, restore_manager)
|
||||
mount_and_backup(self)
|
||||
mount(self)
|
||||
create(cls, method, **kwargs)
|
||||
|
||||
Usage:
|
||||
method = BackupMethod.create("tar")
|
||||
method.mount_and_backup(backup_manager)
|
||||
method = BackupMethod.create("tar", backup_manager)
|
||||
method.mount_and_backup()
|
||||
#or
|
||||
method = BackupMethod.create("copy")
|
||||
method.mount(restore_manager)
|
||||
method = BackupMethod.create("copy", restore_manager)
|
||||
method.mount()
|
||||
"""
|
||||
|
||||
def __init__(self, repo=None):
|
||||
def __init__(self, manager, repo=None):
|
||||
"""
|
||||
BackupMethod constructors
|
||||
|
||||
|
@ -1542,6 +1556,7 @@ class BackupMethod(object):
|
|||
BackupRepository object. If None, the default repo is used :
|
||||
/home/yunohost.backup/archives/
|
||||
"""
|
||||
self.manager = manager
|
||||
self.repo = ARCHIVES_PATH if repo is None else repo
|
||||
|
||||
@property
|
||||
|
@ -1587,18 +1602,13 @@ class BackupMethod(object):
|
|||
"""
|
||||
return False
|
||||
|
||||
def mount_and_backup(self, backup_manager):
|
||||
def mount_and_backup(self):
|
||||
"""
|
||||
Run the backup on files listed by the BackupManager instance
|
||||
|
||||
This method shouldn't be overrided, prefer overriding self.backup() and
|
||||
self.clean()
|
||||
|
||||
Args:
|
||||
backup_manager -- (BackupManager) A backup manager instance that has
|
||||
already done the files collection step.
|
||||
"""
|
||||
self.manager = backup_manager
|
||||
if self.need_mount():
|
||||
self._organize_files()
|
||||
|
||||
|
@ -1607,17 +1617,13 @@ class BackupMethod(object):
|
|||
finally:
|
||||
self.clean()
|
||||
|
||||
def mount(self, restore_manager):
|
||||
def mount(self):
|
||||
"""
|
||||
Mount the archive from RestoreManager instance in the working directory
|
||||
|
||||
This method should be extended.
|
||||
|
||||
Args:
|
||||
restore_manager -- (RestoreManager) A restore manager instance
|
||||
contains an archive to restore.
|
||||
"""
|
||||
self.manager = restore_manager
|
||||
pass
|
||||
|
||||
def clean(self):
|
||||
"""
|
||||
|
@ -1762,7 +1768,7 @@ class BackupMethod(object):
|
|||
shutil.copy(path['source'], dest)
|
||||
|
||||
@classmethod
|
||||
def create(cls, method, *args):
|
||||
def create(cls, method, manager, *args):
|
||||
"""
|
||||
Factory method to create instance of BackupMethod
|
||||
|
||||
|
@ -1778,7 +1784,7 @@ class BackupMethod(object):
|
|||
if not isinstance(method, basestring):
|
||||
methods = []
|
||||
for m in method:
|
||||
methods.append(BackupMethod.create(m, *args))
|
||||
methods.append(BackupMethod.create(m, manager, *args))
|
||||
return methods
|
||||
|
||||
bm_class = {
|
||||
|
@ -1787,9 +1793,9 @@ class BackupMethod(object):
|
|||
'borg': BorgBackupMethod
|
||||
}
|
||||
if method in ["copy", "tar", "borg"]:
|
||||
return bm_class[method](*args)
|
||||
return bm_class[method](manager, *args)
|
||||
else:
|
||||
return CustomBackupMethod(method=method, *args)
|
||||
return CustomBackupMethod(manager, method=method, *args)
|
||||
|
||||
|
||||
class CopyBackupMethod(BackupMethod):
|
||||
|
@ -1799,8 +1805,8 @@ class CopyBackupMethod(BackupMethod):
|
|||
could be the inverse for restoring
|
||||
"""
|
||||
|
||||
def __init__(self, repo=None):
|
||||
super(CopyBackupMethod, self).__init__(repo)
|
||||
def __init__(self, manager, repo=None):
|
||||
super(CopyBackupMethod, self).__init__(manager, repo)
|
||||
|
||||
@property
|
||||
def method_name(self):
|
||||
|
@ -1854,6 +1860,9 @@ class CopyBackupMethod(BackupMethod):
|
|||
"&&", "umount", "-R", self.work_dir])
|
||||
raise YunohostError('backup_cant_mount_uncompress_archive')
|
||||
|
||||
def copy(self, file, target):
|
||||
shutil.copy(file, target)
|
||||
|
||||
|
||||
class TarBackupMethod(BackupMethod):
|
||||
|
||||
|
@ -1861,8 +1870,8 @@ class TarBackupMethod(BackupMethod):
|
|||
This class compress all files to backup in archive.
|
||||
"""
|
||||
|
||||
def __init__(self, repo=None):
|
||||
super(TarBackupMethod, self).__init__(repo)
|
||||
def __init__(self, manager, repo=None):
|
||||
super(TarBackupMethod, self).__init__(manager, repo)
|
||||
|
||||
@property
|
||||
def method_name(self):
|
||||
|
@ -1922,7 +1931,7 @@ class TarBackupMethod(BackupMethod):
|
|||
if not os.path.isfile(link):
|
||||
os.symlink(self._archive_file, link)
|
||||
|
||||
def mount(self, restore_manager):
|
||||
def mount(self):
|
||||
"""
|
||||
Mount the archive. We avoid copy to be able to restore on system without
|
||||
too many space.
|
||||
|
@ -1932,9 +1941,10 @@ class TarBackupMethod(BackupMethod):
|
|||
backup_archive_corrupted -- Raised if the archive appears corrupted
|
||||
backup_archive_cant_retrieve_info_json -- If the info.json file can't be retrieved
|
||||
"""
|
||||
super(TarBackupMethod, self).mount(restore_manager)
|
||||
super(TarBackupMethod, self).mount()
|
||||
|
||||
# Check the archive can be open
|
||||
# Mount the tarball
|
||||
logger.debug(m18n.n("restore_extracting"))
|
||||
try:
|
||||
tar = tarfile.open(self._archive_file, "r:gz")
|
||||
except:
|
||||
|
@ -1947,15 +1957,7 @@ class TarBackupMethod(BackupMethod):
|
|||
except IOError as e:
|
||||
raise YunohostError("backup_archive_corrupted", archive=self._archive_file, error=str(e))
|
||||
|
||||
# FIXME : Is this really useful to close the archive just to
|
||||
# reopen it right after this with the same options ...?
|
||||
tar.close()
|
||||
|
||||
# Mount the tarball
|
||||
logger.debug(m18n.n("restore_extracting"))
|
||||
tar = tarfile.open(self._archive_file, "r:gz")
|
||||
|
||||
if "info.json" in files_in_archive:
|
||||
if "info.json" in tar.getnames():
|
||||
leading_dot = ""
|
||||
tar.extract('info.json', path=self.work_dir)
|
||||
elif "./info.json" in files_in_archive:
|
||||
|
@ -2010,7 +2012,15 @@ class TarBackupMethod(BackupMethod):
|
|||
]
|
||||
tar.extractall(members=subdir_and_files, path=self.work_dir)
|
||||
|
||||
# FIXME : Don't we want to close the tar archive here or at some point ?
|
||||
tar.close()
|
||||
|
||||
def copy(self, file, target):
|
||||
tar = tarfile.open(self._archive_file, "r:gz")
|
||||
file_to_extract = tar.getmember(file)
|
||||
# Remove the path
|
||||
file_to_extract.name = os.path.basename(file_to_extract.name)
|
||||
tar.extract(file_to_extract, path=target)
|
||||
tar.close()
|
||||
|
||||
|
||||
class BorgBackupMethod(BackupMethod):
|
||||
|
@ -2029,6 +2039,9 @@ class BorgBackupMethod(BackupMethod):
|
|||
def mount(self, mnt_path):
|
||||
raise YunohostError('backup_borg_not_implemented')
|
||||
|
||||
def copy(self, file, target):
|
||||
raise YunohostError('backup_borg_not_implemented')
|
||||
|
||||
|
||||
class CustomBackupMethod(BackupMethod):
|
||||
|
||||
|
@ -2038,8 +2051,8 @@ class CustomBackupMethod(BackupMethod):
|
|||
/etc/yunohost/hooks.d/backup_method/
|
||||
"""
|
||||
|
||||
def __init__(self, repo=None, method=None, **kwargs):
|
||||
super(CustomBackupMethod, self).__init__(repo)
|
||||
def __init__(self, manager, repo=None, method=None, **kwargs):
|
||||
super(CustomBackupMethod, self).__init__(manager, repo)
|
||||
self.args = kwargs
|
||||
self.method = method
|
||||
self._need_mount = None
|
||||
|
@ -2080,14 +2093,14 @@ class CustomBackupMethod(BackupMethod):
|
|||
if ret_failed:
|
||||
raise YunohostError('backup_custom_backup_error')
|
||||
|
||||
def mount(self, restore_manager):
|
||||
def mount(self):
|
||||
"""
|
||||
Launch a custom script to mount the custom archive
|
||||
|
||||
Exceptions:
|
||||
backup_custom_mount_error -- Raised if the custom script failed
|
||||
"""
|
||||
super(CustomBackupMethod, self).mount(restore_manager)
|
||||
super(CustomBackupMethod, self).mount()
|
||||
ret = hook_callback('backup_method', [self.method],
|
||||
args=self._get_args('mount'))
|
||||
|
||||
|
@ -2178,9 +2191,9 @@ def backup_create(name=None, description=None, methods=[],
|
|||
|
||||
# Add backup methods
|
||||
if output_directory:
|
||||
methods = BackupMethod.create(methods, output_directory)
|
||||
methods = BackupMethod.create(methods, backup_manager, output_directory)
|
||||
else:
|
||||
methods = BackupMethod.create(methods)
|
||||
methods = BackupMethod.create(methods, backup_manager)
|
||||
|
||||
for method in methods:
|
||||
backup_manager.add(method)
|
||||
|
@ -2288,34 +2301,25 @@ def backup_list(with_info=False, human_readable=False):
|
|||
human_readable -- Print sizes in human readable format
|
||||
|
||||
"""
|
||||
result = []
|
||||
# Get local archives sorted according to last modification time
|
||||
archives = sorted(glob("%s/*.tar.gz" % ARCHIVES_PATH), key=lambda x: os.path.getctime(x))
|
||||
# Extract only filename without the extension
|
||||
archives = [os.path.basename(f)[:-len(".tar.gz")] for f in archives]
|
||||
|
||||
try:
|
||||
# Retrieve local archives
|
||||
archives = os.listdir(ARCHIVES_PATH)
|
||||
except OSError:
|
||||
logger.debug("unable to iterate over local archives", exc_info=1)
|
||||
else:
|
||||
# Iterate over local archives
|
||||
for f in archives:
|
||||
try:
|
||||
name = f[:f.rindex('.tar.gz')]
|
||||
except ValueError:
|
||||
continue
|
||||
result.append(name)
|
||||
result.sort(key=lambda x: os.path.getctime(os.path.join(ARCHIVES_PATH, x + ".tar.gz")))
|
||||
|
||||
if result and with_info:
|
||||
if with_info:
|
||||
d = OrderedDict()
|
||||
for a in result:
|
||||
for archive in archives:
|
||||
try:
|
||||
d[a] = backup_info(a, human_readable=human_readable)
|
||||
d[archive] = backup_info(archive, human_readable=human_readable)
|
||||
except YunohostError as e:
|
||||
logger.warning(str(e))
|
||||
except Exception as e:
|
||||
import traceback
|
||||
logger.warning("Could not check infos for archive %s: %s" % (archive, '\n'+traceback.format_exc()))
|
||||
|
||||
result = d
|
||||
archives = d
|
||||
|
||||
return {'archives': result}
|
||||
return {'archives': archives}
|
||||
|
||||
|
||||
def backup_info(name, with_details=False, human_readable=False):
|
||||
|
|
|
@ -29,7 +29,6 @@ import pwd
|
|||
import grp
|
||||
import smtplib
|
||||
import subprocess
|
||||
import dns.resolver
|
||||
import glob
|
||||
|
||||
from datetime import datetime
|
||||
|
@ -42,6 +41,7 @@ from yunohost.vendor.acme_tiny.acme_tiny import get_crt as sign_certificate
|
|||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.utils.network import get_public_ip
|
||||
|
||||
from yunohost.diagnosis import Diagnoser
|
||||
from yunohost.service import _run_service_command
|
||||
from yunohost.regenconf import regen_conf
|
||||
from yunohost.log import OperationLogger
|
||||
|
@ -68,18 +68,6 @@ PRODUCTION_CERTIFICATION_AUTHORITY = "https://acme-v02.api.letsencrypt.org"
|
|||
|
||||
INTERMEDIATE_CERTIFICATE_URL = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem"
|
||||
|
||||
DNS_RESOLVERS = [
|
||||
# FFDN DNS resolvers
|
||||
# See https://www.ffdn.org/wiki/doku.php?id=formations:dns
|
||||
"80.67.169.12", # FDN
|
||||
"80.67.169.40", #
|
||||
"89.234.141.66", # ARN
|
||||
"141.255.128.100", # Aquilenet
|
||||
"141.255.128.101",
|
||||
"89.234.186.18", # Grifon
|
||||
"80.67.188.188" # LDN
|
||||
]
|
||||
|
||||
#
|
||||
# Front-end stuff #
|
||||
#
|
||||
|
@ -115,10 +103,16 @@ def certificate_status(domain_list, full=False):
|
|||
if not full:
|
||||
del status["subject"]
|
||||
del status["CA_name"]
|
||||
del status["ACME_eligible"]
|
||||
status["CA_type"] = status["CA_type"]["verbose"]
|
||||
status["summary"] = status["summary"]["verbose"]
|
||||
|
||||
if full:
|
||||
try:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
status["ACME_eligible"] = True
|
||||
except:
|
||||
status["ACME_eligible"] = False
|
||||
|
||||
del status["domain"]
|
||||
certificates[domain] = status
|
||||
|
||||
|
@ -272,30 +266,36 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False,
|
|||
# Actual install steps
|
||||
for domain in domain_list:
|
||||
|
||||
operation_logger = OperationLogger('letsencrypt_cert_install', [('domain', domain)],
|
||||
args={'force': force, 'no_checks': no_checks,
|
||||
'staging': staging})
|
||||
if not no_checks:
|
||||
try:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Now attempting install of certificate for domain %s!", domain)
|
||||
|
||||
operation_logger = OperationLogger('letsencrypt_cert_install', [('domain', domain)],
|
||||
args={'force': force, 'no_checks': no_checks,
|
||||
'staging': staging})
|
||||
operation_logger.start()
|
||||
|
||||
try:
|
||||
if not no_checks:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
|
||||
operation_logger.start()
|
||||
|
||||
_fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks)
|
||||
except Exception as e:
|
||||
msg = "Certificate installation for %s failed !\nException: %s" % (domain, e)
|
||||
logger.error(msg)
|
||||
operation_logger.error(msg)
|
||||
if no_checks:
|
||||
logger.error("Please consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." % domain)
|
||||
else:
|
||||
_install_cron(no_checks=no_checks)
|
||||
|
||||
logger.success(
|
||||
m18n.n("certmanager_cert_install_success", domain=domain))
|
||||
|
||||
operation_logger.success()
|
||||
except Exception as e:
|
||||
_display_debug_information(domain)
|
||||
msg = "Certificate installation for %s failed !\nException: %s" % (domain, e)
|
||||
logger.error(msg)
|
||||
operation_logger.error(msg)
|
||||
|
||||
|
||||
def certificate_renew(domain_list, force=False, no_checks=False, email=False, staging=False):
|
||||
|
@ -366,32 +366,34 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
|
|||
# Actual renew steps
|
||||
for domain in domain_list:
|
||||
|
||||
operation_logger = OperationLogger('letsencrypt_cert_renew', [('domain', domain)],
|
||||
args={'force': force, 'no_checks': no_checks,
|
||||
'staging': staging, 'email': email})
|
||||
if not no_checks:
|
||||
try:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
if email:
|
||||
logger.error("Sending email with details to root ...")
|
||||
_email_renewing_failed(domain, e)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Now attempting renewing of certificate for domain %s !", domain)
|
||||
|
||||
operation_logger = OperationLogger('letsencrypt_cert_renew', [('domain', domain)],
|
||||
args={'force': force, 'no_checks': no_checks,
|
||||
'staging': staging, 'email': email})
|
||||
operation_logger.start()
|
||||
|
||||
try:
|
||||
if not no_checks:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
|
||||
operation_logger.start()
|
||||
|
||||
_fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks)
|
||||
|
||||
logger.success(
|
||||
m18n.n("certmanager_cert_renew_success", domain=domain))
|
||||
|
||||
operation_logger.success()
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
from StringIO import StringIO
|
||||
stack = StringIO()
|
||||
traceback.print_exc(file=stack)
|
||||
msg = "Certificate renewing for %s failed !" % (domain)
|
||||
if no_checks:
|
||||
msg += "\nPlease consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." % domain
|
||||
logger.error(msg)
|
||||
operation_logger.error(msg)
|
||||
logger.error(stack.getvalue())
|
||||
|
@ -399,7 +401,11 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
|
|||
|
||||
if email:
|
||||
logger.error("Sending email with details to root ...")
|
||||
_email_renewing_failed(domain, e, stack.getvalue())
|
||||
_email_renewing_failed(domain, msg + "\n" + e, stack.getvalue())
|
||||
else:
|
||||
logger.success(
|
||||
m18n.n("certmanager_cert_renew_success", domain=domain))
|
||||
operation_logger.success()
|
||||
|
||||
#
|
||||
# Back-end stuff #
|
||||
|
@ -431,7 +437,7 @@ def _install_cron(no_checks=False):
|
|||
_set_permissions(cron_job_file, "root", "root", 0o755)
|
||||
|
||||
|
||||
def _email_renewing_failed(domain, exception_message, stack):
|
||||
def _email_renewing_failed(domain, exception_message, stack=""):
|
||||
from_ = "certmanager@%s (Certificate Manager)" % domain
|
||||
to_ = "root"
|
||||
subject_ = "Certificate renewing attempt for %s failed!" % domain
|
||||
|
@ -526,7 +532,6 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
|
|||
raise YunohostError('certmanager_hit_rate_limit', domain=domain)
|
||||
else:
|
||||
logger.error(str(e))
|
||||
_display_debug_information(domain)
|
||||
raise YunohostError('certmanager_cert_signing_failed')
|
||||
|
||||
except Exception as e:
|
||||
|
@ -596,10 +601,10 @@ def _prepare_certificate_signing_request(domain, key_file, output_folder):
|
|||
# For "parent" domains, include xmpp-upload subdomain in subject alternate names
|
||||
if domain in domain_list(exclude_subdomains=True)["domains"]:
|
||||
subdomain = "xmpp-upload." + domain
|
||||
try:
|
||||
_dns_ip_match_public_ip(get_public_ip(), subdomain)
|
||||
xmpp_records = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "xmpp"}).get("data") or {}
|
||||
if xmpp_records.get("CNAME:xmpp-upload") == "OK":
|
||||
csr.add_extensions([crypto.X509Extension("subjectAltName", False, "DNS:" + subdomain)])
|
||||
except YunohostError:
|
||||
else:
|
||||
logger.warning(m18n.n('certmanager_warning_subdomain_dns_record', subdomain=subdomain, domain=domain))
|
||||
|
||||
# Set the key
|
||||
|
@ -700,12 +705,6 @@ def _get_status(domain):
|
|||
"verbose": "Unknown?",
|
||||
}
|
||||
|
||||
try:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
ACME_eligible = True
|
||||
except:
|
||||
ACME_eligible = False
|
||||
|
||||
return {
|
||||
"domain": domain,
|
||||
"subject": cert_subject,
|
||||
|
@ -713,7 +712,6 @@ def _get_status(domain):
|
|||
"CA_type": CA_type,
|
||||
"validity": days_remaining,
|
||||
"summary": status_summary,
|
||||
"ACME_eligible": ACME_eligible
|
||||
}
|
||||
|
||||
#
|
||||
|
@ -790,70 +788,22 @@ def _backup_current_cert(domain):
|
|||
|
||||
|
||||
def _check_domain_is_ready_for_ACME(domain):
|
||||
public_ip = get_public_ip()
|
||||
|
||||
dnsrecords = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "basic"}, warn_if_no_cache=False) or {}
|
||||
httpreachable = Diagnoser.get_cached_report("web", item={"domain": domain}, warn_if_no_cache=False) or {}
|
||||
|
||||
if not dnsrecords or not httpreachable:
|
||||
raise YunohostError('certmanager_domain_not_diagnosed_yet', domain=domain)
|
||||
|
||||
# Check if IP from DNS matches public IP
|
||||
if not _dns_ip_match_public_ip(public_ip, domain):
|
||||
if not dnsrecords.get("status") in ["SUCCESS", "WARNING"]: # Warning is for missing IPv6 record which ain't critical for ACME
|
||||
raise YunohostError('certmanager_domain_dns_ip_differs_from_public_ip', domain=domain)
|
||||
|
||||
# Check if domain seems to be accessible through HTTP?
|
||||
if not _domain_is_accessible_through_HTTP(public_ip, domain):
|
||||
if not httpreachable.get("status") == "SUCCESS":
|
||||
raise YunohostError('certmanager_domain_http_not_working', domain=domain)
|
||||
|
||||
|
||||
def _get_dns_ip(domain):
|
||||
try:
|
||||
resolver = dns.resolver.Resolver()
|
||||
resolver.nameservers = DNS_RESOLVERS
|
||||
answers = resolver.query(domain, "A")
|
||||
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
|
||||
raise YunohostError('certmanager_error_no_A_record', domain=domain)
|
||||
|
||||
return str(answers[0])
|
||||
|
||||
|
||||
def _dns_ip_match_public_ip(public_ip, domain):
|
||||
return _get_dns_ip(domain) == public_ip
|
||||
|
||||
|
||||
def _domain_is_accessible_through_HTTP(ip, domain):
|
||||
import requests # lazy loading this module for performance reasons
|
||||
try:
|
||||
requests.head("http://" + ip, headers={"Host": domain}, timeout=10)
|
||||
except requests.exceptions.Timeout as e:
|
||||
logger.warning(m18n.n('certmanager_http_check_timeout', domain=domain, ip=ip))
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug("Couldn't reach domain '%s' by requesting this ip '%s' because: %s" % (domain, ip, e))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _get_local_dns_ip(domain):
|
||||
try:
|
||||
resolver = dns.resolver.Resolver()
|
||||
answers = resolver.query(domain, "A")
|
||||
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
|
||||
logger.warning("Failed to resolved domain '%s' locally", domain)
|
||||
return None
|
||||
|
||||
return str(answers[0])
|
||||
|
||||
|
||||
def _display_debug_information(domain):
|
||||
dns_ip = _get_dns_ip(domain)
|
||||
public_ip = get_public_ip()
|
||||
local_dns_ip = _get_local_dns_ip(domain)
|
||||
|
||||
logger.warning("""\
|
||||
Debug information:
|
||||
- domain ip from DNS %s
|
||||
- domain ip from local DNS %s
|
||||
- public ip of the server %s
|
||||
""", dns_ip, local_dns_ip, public_ip)
|
||||
|
||||
|
||||
# FIXME / TODO : ideally this should not be needed. There should be a proper
|
||||
# mechanism to regularly check the value of the public IP and trigger
|
||||
# corresponding hooks (e.g. dyndns update and dnsmasq regen-conf)
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
import subprocess
|
||||
import glob
|
||||
from yunohost.tools import Migration
|
||||
from moulinette.utils.filesystem import chown
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Change certificates group permissions from 'metronome' to 'ssl-cert'"
|
||||
|
||||
all_certificate_files = glob.glob("/etc/yunohost/certs/*/*.pem")
|
||||
|
||||
def run(self):
|
||||
for filename in self.all_certificate_files:
|
||||
chown(filename, uid="root", gid="ssl-cert")
|
|
@ -1,86 +0,0 @@
|
|||
import glob
|
||||
import os
|
||||
import requests
|
||||
import base64
|
||||
import time
|
||||
import json
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.dyndns import _guess_current_dyndns_domain
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate Dyndns stuff from MD5 TSIG to SHA512 TSIG"
|
||||
|
||||
def run(self, dyn_host="dyndns.yunohost.org", domain=None, private_key_path=None):
|
||||
|
||||
if domain is None or private_key_path is None:
|
||||
try:
|
||||
(domain, private_key_path) = _guess_current_dyndns_domain(dyn_host)
|
||||
assert "+157" in private_key_path
|
||||
except (YunohostError, AssertionError):
|
||||
logger.info(m18n.n("migrate_tsig_not_needed"))
|
||||
return
|
||||
|
||||
logger.info(m18n.n('migrate_tsig_start', domain=domain))
|
||||
public_key_path = private_key_path.rsplit(".private", 1)[0] + ".key"
|
||||
public_key_md5 = open(public_key_path).read().strip().split(' ')[-1]
|
||||
|
||||
os.system('cd /etc/yunohost/dyndns && '
|
||||
'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain)
|
||||
os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private')
|
||||
|
||||
# +165 means that this file store a hmac-sha512 key
|
||||
new_key_path = glob.glob('/etc/yunohost/dyndns/*+165*.key')[0]
|
||||
public_key_sha512 = open(new_key_path).read().strip().split(' ', 6)[-1]
|
||||
|
||||
try:
|
||||
r = requests.put('https://%s/migrate_key_to_sha512/' % (dyn_host),
|
||||
data={
|
||||
'public_key_md5': base64.b64encode(public_key_md5),
|
||||
'public_key_sha512': base64.b64encode(public_key_sha512),
|
||||
}, timeout=30)
|
||||
except requests.ConnectionError:
|
||||
raise YunohostError('no_internet_connection')
|
||||
|
||||
if r.status_code != 201:
|
||||
try:
|
||||
error = json.loads(r.text)['error']
|
||||
except Exception:
|
||||
# failed to decode json
|
||||
error = r.text
|
||||
|
||||
import traceback
|
||||
from StringIO import StringIO
|
||||
stack = StringIO()
|
||||
traceback.print_stack(file=stack)
|
||||
logger.error(stack.getvalue())
|
||||
|
||||
# Migration didn't succeed, so we rollback and raise an exception
|
||||
os.system("mv /etc/yunohost/dyndns/*+165* /tmp")
|
||||
|
||||
raise YunohostError('migrate_tsig_failed', domain=domain,
|
||||
error_code=str(r.status_code), error=error)
|
||||
|
||||
# remove old certificates
|
||||
os.system("mv /etc/yunohost/dyndns/*+157* /tmp")
|
||||
|
||||
# sleep to wait for dyndns cache invalidation
|
||||
logger.info(m18n.n('migrate_tsig_wait'))
|
||||
time.sleep(60)
|
||||
logger.info(m18n.n('migrate_tsig_wait_2'))
|
||||
time.sleep(60)
|
||||
logger.info(m18n.n('migrate_tsig_wait_3'))
|
||||
time.sleep(30)
|
||||
logger.info(m18n.n('migrate_tsig_wait_4'))
|
||||
time.sleep(30)
|
||||
|
||||
logger.info(m18n.n('migrate_tsig_end'))
|
||||
return
|
|
@ -1,379 +0,0 @@
|
|||
import glob
|
||||
import os
|
||||
from shutil import copy2
|
||||
|
||||
from moulinette import m18n, msettings
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.process import check_output, call_async_output
|
||||
from moulinette.utils.filesystem import read_file
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.app import unstable_apps
|
||||
from yunohost.service import _run_service_command
|
||||
from yunohost.regenconf import (manually_modified_files,
|
||||
manually_modified_files_compared_to_debian_default)
|
||||
from yunohost.utils.filesystem import free_space_in_directory
|
||||
from yunohost.utils.packages import get_installed_version
|
||||
from yunohost.utils.network import get_network_interfaces
|
||||
from yunohost.firewall import firewall_allow, firewall_disallow
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
YUNOHOST_PACKAGES = ["yunohost", "yunohost-admin", "moulinette", "ssowat"]
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Upgrade the system to Debian Stretch and Yunohost 3.0"
|
||||
|
||||
mode = "manual"
|
||||
|
||||
def run(self):
|
||||
|
||||
self.logfile = "/var/log/yunohost/{}.log".format(self.name)
|
||||
|
||||
self.check_assertions()
|
||||
|
||||
logger.info(m18n.n("migration_0003_start", logfile=self.logfile))
|
||||
|
||||
# Preparing the upgrade
|
||||
self.restore_original_nginx_conf_if_needed()
|
||||
|
||||
logger.info(m18n.n("migration_0003_patching_sources_list"))
|
||||
self.patch_apt_sources_list()
|
||||
self.backup_files_to_keep()
|
||||
self.apt_update()
|
||||
apps_packages = self.get_apps_equivs_packages()
|
||||
self.unhold(["metronome"])
|
||||
self.hold(YUNOHOST_PACKAGES + apps_packages + ["fail2ban"])
|
||||
|
||||
# Main dist-upgrade
|
||||
logger.info(m18n.n("migration_0003_main_upgrade"))
|
||||
_run_service_command("stop", "mysql")
|
||||
self.apt_dist_upgrade(conf_flags=["old", "miss", "def"])
|
||||
_run_service_command("start", "mysql")
|
||||
if self.debian_major_version() == 8:
|
||||
raise YunohostError("migration_0003_still_on_jessie_after_main_upgrade", log=self.logfile)
|
||||
|
||||
# Specific upgrade for fail2ban...
|
||||
logger.info(m18n.n("migration_0003_fail2ban_upgrade"))
|
||||
self.unhold(["fail2ban"])
|
||||
# Don't move this if folder already exists. If it does, we probably are
|
||||
# running this script a 2nd, 3rd, ... time but /etc/fail2ban will
|
||||
# be re-created only for the first dist-upgrade of fail2ban
|
||||
if not os.path.exists("/etc/fail2ban.old"):
|
||||
os.system("mv /etc/fail2ban /etc/fail2ban.old")
|
||||
self.apt_dist_upgrade(conf_flags=["new", "miss", "def"])
|
||||
_run_service_command("restart", "fail2ban")
|
||||
|
||||
self.disable_predicable_interface_names()
|
||||
|
||||
# Clean the mess
|
||||
os.system("apt autoremove --assume-yes")
|
||||
os.system("apt clean --assume-yes")
|
||||
|
||||
# We moved to port 587 for SMTP
|
||||
# https://busylog.net/smtp-tls-ssl-25-465-587/
|
||||
firewall_allow("Both", 587)
|
||||
firewall_disallow("Both", 465)
|
||||
|
||||
# Upgrade yunohost packages
|
||||
logger.info(m18n.n("migration_0003_yunohost_upgrade"))
|
||||
self.restore_files_to_keep()
|
||||
self.unhold(YUNOHOST_PACKAGES + apps_packages)
|
||||
self.upgrade_yunohost_packages()
|
||||
|
||||
def debian_major_version(self):
|
||||
# The python module "platform" and lsb_release are not reliable because
|
||||
# on some setup, they still return Release=8 even after upgrading to
|
||||
# stretch ... (Apparently this is related to OVH overriding some stuff
|
||||
# with /etc/lsb-release for instance -_-)
|
||||
# Instead, we rely on /etc/os-release which should be the raw info from
|
||||
# the distribution...
|
||||
return int(check_output("grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2"))
|
||||
|
||||
def yunohost_major_version(self):
|
||||
return int(get_installed_version("yunohost").split('.')[0])
|
||||
|
||||
def check_assertions(self):
|
||||
|
||||
# Be on jessie (8.x) and yunohost 2.x
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be >= 9.x but yunohost package
|
||||
# would still be in 2.x...
|
||||
if not self.debian_major_version() == 8 \
|
||||
and not self.yunohost_major_version() == 2:
|
||||
raise YunohostError("migration_0003_not_jessie")
|
||||
|
||||
# Have > 1 Go free space on /var/ ?
|
||||
if free_space_in_directory("/var/") / (1024**3) < 1.0:
|
||||
raise YunohostError("There is not enough free space in /var/ to run the migration. You need at least 1GB free space")
|
||||
|
||||
# Check system is up to date
|
||||
# (but we don't if 'stretch' is already in the sources.list ...
|
||||
# which means maybe a previous upgrade crashed and we're re-running it)
|
||||
if " stretch " not in read_file("/etc/apt/sources.list"):
|
||||
self.apt_update()
|
||||
apt_list_upgradable = check_output("apt list --upgradable -a")
|
||||
if "upgradable" in apt_list_upgradable:
|
||||
raise YunohostError("migration_0003_system_not_fully_up_to_date")
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
|
||||
# Avoid having a super long disclaimer + uncessary check if we ain't
|
||||
# on jessie / yunohost 2.x anymore
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be >= 9.x but yunohost package
|
||||
# would still be in 2.x...
|
||||
if not self.debian_major_version() == 8 \
|
||||
and not self.yunohost_major_version() == 2:
|
||||
return None
|
||||
|
||||
# Get list of problematic apps ? I.e. not official or community+working
|
||||
problematic_apps = unstable_apps()
|
||||
problematic_apps = "".join(["\n - " + app for app in problematic_apps])
|
||||
|
||||
# Manually modified files ? (c.f. yunohost service regen-conf)
|
||||
modified_files = manually_modified_files()
|
||||
# We also have a specific check for nginx.conf which some people
|
||||
# modified and needs to be upgraded...
|
||||
if "/etc/nginx/nginx.conf" in manually_modified_files_compared_to_debian_default():
|
||||
modified_files.append("/etc/nginx/nginx.conf")
|
||||
modified_files = "".join(["\n - " + f for f in modified_files])
|
||||
|
||||
message = m18n.n("migration_0003_general_warning")
|
||||
|
||||
if problematic_apps:
|
||||
message += "\n\n" + m18n.n("migration_0003_problematic_apps_warning", problematic_apps=problematic_apps)
|
||||
|
||||
if modified_files:
|
||||
message += "\n\n" + m18n.n("migration_0003_modified_files", manually_modified_files=modified_files)
|
||||
|
||||
return message
|
||||
|
||||
def patch_apt_sources_list(self):
|
||||
|
||||
sources_list = glob.glob("/etc/apt/sources.list.d/*.list")
|
||||
sources_list.append("/etc/apt/sources.list")
|
||||
|
||||
# This :
|
||||
# - replace single 'jessie' occurence by 'stretch'
|
||||
# - comments lines containing "backports"
|
||||
# - replace 'jessie/updates' by 'strech/updates' (or same with a -)
|
||||
# - switch yunohost's repo to forge
|
||||
for f in sources_list:
|
||||
command = "sed -i -e 's@ jessie @ stretch @g' " \
|
||||
"-e '/backports/ s@^#*@#@' " \
|
||||
"-e 's@ jessie/updates @ stretch/updates @g' " \
|
||||
"-e 's@ jessie-updates @ stretch-updates @g' " \
|
||||
"-e 's@repo.yunohost@forge.yunohost@g' " \
|
||||
"{}".format(f)
|
||||
os.system(command)
|
||||
|
||||
def get_apps_equivs_packages(self):
|
||||
|
||||
command = "dpkg --get-selections" \
|
||||
" | grep -v deinstall" \
|
||||
" | awk '{print $1}'" \
|
||||
" | { grep 'ynh-deps$' || true; }"
|
||||
|
||||
output = check_output(command).strip()
|
||||
|
||||
return output.split('\n') if output else []
|
||||
|
||||
def hold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark hold {}".format(package))
|
||||
|
||||
def unhold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark unhold {}".format(package))
|
||||
|
||||
def apt_update(self):
|
||||
|
||||
command = "apt-get update"
|
||||
logger.debug("Running apt command :\n{}".format(command))
|
||||
command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
os.system(command)
|
||||
|
||||
def upgrade_yunohost_packages(self):
|
||||
|
||||
#
|
||||
# Here we use a dirty hack to run a command after the current
|
||||
# "yunohost tools migrations migrate", because the upgrade of
|
||||
# yunohost will also trigger another "yunohost tools migrations migrate"
|
||||
# (also the upgrade of the package, if executed from the webadmin, is
|
||||
# likely to kill/restart the api which is in turn likely to kill this
|
||||
# command before it ends...)
|
||||
#
|
||||
|
||||
MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock"
|
||||
|
||||
upgrade_command = ""
|
||||
upgrade_command += " DEBIAN_FRONTEND=noninteractive"
|
||||
upgrade_command += " APT_LISTCHANGES_FRONTEND=none"
|
||||
upgrade_command += " apt-get install"
|
||||
upgrade_command += " --assume-yes "
|
||||
upgrade_command += " ".join(YUNOHOST_PACKAGES)
|
||||
# We also install php-zip and php7.0-acpu to fix an issue with
|
||||
# nextcloud and kanboard that need it when on stretch.
|
||||
upgrade_command += " php-zip php7.0-apcu"
|
||||
upgrade_command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
wait_until_end_of_yunohost_command = "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK)
|
||||
|
||||
command = "({} && {}; echo 'Migration complete!') &".format(wait_until_end_of_yunohost_command,
|
||||
upgrade_command)
|
||||
|
||||
logger.debug("Running command :\n{}".format(command))
|
||||
|
||||
os.system(command)
|
||||
|
||||
def apt_dist_upgrade(self, conf_flags):
|
||||
|
||||
# Make apt-get happy
|
||||
os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections")
|
||||
# Don't send an email to root about the postgresql migration. It should be handled automatically after.
|
||||
os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections")
|
||||
|
||||
command = ""
|
||||
command += " DEBIAN_FRONTEND=noninteractive"
|
||||
command += " APT_LISTCHANGES_FRONTEND=none"
|
||||
command += " apt-get"
|
||||
command += " --fix-broken --show-upgraded --assume-yes"
|
||||
for conf_flag in conf_flags:
|
||||
command += ' -o Dpkg::Options::="--force-conf{}"'.format(conf_flag)
|
||||
command += " dist-upgrade"
|
||||
|
||||
logger.debug("Running apt command :\n{}".format(command))
|
||||
|
||||
command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
is_api = msettings.get('interface') == 'api'
|
||||
if is_api:
|
||||
callbacks = (
|
||||
lambda l: logger.info(l.rstrip()),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
)
|
||||
call_async_output(command, callbacks, shell=True)
|
||||
else:
|
||||
# We do this when running from the cli to have the output of the
|
||||
# command showing in the terminal, since 'info' channel is only
|
||||
# enabled if the user explicitly add --verbose ...
|
||||
os.system(command)
|
||||
|
||||
# Those are files that should be kept and restored before the final switch
|
||||
# to yunohost 3.x... They end up being modified by the various dist-upgrades
|
||||
# (or need to be taken out momentarily), which then blocks the regen-conf
|
||||
# as they are flagged as "manually modified"...
|
||||
files_to_keep = [
|
||||
"/etc/mysql/my.cnf",
|
||||
"/etc/nslcd.conf",
|
||||
"/etc/postfix/master.cf",
|
||||
"/etc/fail2ban/filter.d/yunohost.conf"
|
||||
]
|
||||
|
||||
def backup_files_to_keep(self):
|
||||
|
||||
logger.debug("Backuping specific files to keep ...")
|
||||
|
||||
# Create tmp directory if it does not exists
|
||||
tmp_dir = os.path.join("/tmp/", self.name)
|
||||
if not os.path.exists(tmp_dir):
|
||||
os.mkdir(tmp_dir, 0o700)
|
||||
|
||||
for f in self.files_to_keep:
|
||||
dest_file = f.strip('/').replace("/", "_")
|
||||
|
||||
# If the file is already there, we might be re-running the migration
|
||||
# because it previously crashed. Hence we keep the existing file.
|
||||
if os.path.exists(os.path.join(tmp_dir, dest_file)):
|
||||
continue
|
||||
|
||||
copy2(f, os.path.join(tmp_dir, dest_file))
|
||||
|
||||
def restore_files_to_keep(self):
|
||||
|
||||
logger.debug("Restoring specific files to keep ...")
|
||||
|
||||
tmp_dir = os.path.join("/tmp/", self.name)
|
||||
|
||||
for f in self.files_to_keep:
|
||||
dest_file = f.strip('/').replace("/", "_")
|
||||
copy2(os.path.join(tmp_dir, dest_file), f)
|
||||
|
||||
# On some setups, /etc/nginx/nginx.conf got edited. But this file needs
|
||||
# to be upgraded because of the way the new module system works for nginx.
|
||||
# (in particular, having the line that include the modules at the top)
|
||||
#
|
||||
# So here, if it got edited, we force the restore of the original conf
|
||||
# *before* starting the actual upgrade...
|
||||
#
|
||||
# An alternative strategy that was attempted was to hold the nginx-common
|
||||
# package and have a specific upgrade for it like for fail2ban, but that
|
||||
# leads to apt complaining about not being able to upgrade for shitty
|
||||
# reasons >.>
|
||||
def restore_original_nginx_conf_if_needed(self):
|
||||
if "/etc/nginx/nginx.conf" not in manually_modified_files_compared_to_debian_default():
|
||||
return
|
||||
|
||||
if not os.path.exists("/etc/nginx/nginx.conf"):
|
||||
return
|
||||
|
||||
# If stretch is in the sources.list, we already started migrating on
|
||||
# stretch so we don't re-do this
|
||||
if " stretch " in read_file("/etc/apt/sources.list"):
|
||||
return
|
||||
|
||||
backup_dest = "/home/yunohost.conf/backup/nginx.conf.bkp_before_stretch"
|
||||
|
||||
logger.warning(m18n.n("migration_0003_restoring_origin_nginx_conf",
|
||||
backup_dest=backup_dest))
|
||||
|
||||
os.system("mv /etc/nginx/nginx.conf %s" % backup_dest)
|
||||
|
||||
command = ""
|
||||
command += " DEBIAN_FRONTEND=noninteractive"
|
||||
command += " APT_LISTCHANGES_FRONTEND=none"
|
||||
command += " apt-get"
|
||||
command += " --fix-broken --show-upgraded --assume-yes"
|
||||
command += ' -o Dpkg::Options::="--force-confmiss"'
|
||||
command += " install --reinstall"
|
||||
command += " nginx-common"
|
||||
|
||||
logger.debug("Running apt command :\n{}".format(command))
|
||||
|
||||
command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
is_api = msettings.get('interface') == 'api'
|
||||
if is_api:
|
||||
callbacks = (
|
||||
lambda l: logger.info(l.rstrip()),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
)
|
||||
call_async_output(command, callbacks, shell=True)
|
||||
else:
|
||||
# We do this when running from the cli to have the output of the
|
||||
# command showing in the terminal, since 'info' channel is only
|
||||
# enabled if the user explicitly add --verbose ...
|
||||
os.system(command)
|
||||
|
||||
def disable_predicable_interface_names(self):
|
||||
|
||||
# Try to see if currently used interface names are predictable ones or not...
|
||||
# If we ain't using "eth0" or "wlan0", assume we are using predictable interface
|
||||
# names and therefore they shouldnt be disabled
|
||||
network_interfaces = get_network_interfaces().keys()
|
||||
if "eth0" not in network_interfaces and "wlan0" not in network_interfaces:
|
||||
return
|
||||
|
||||
interfaces_config = read_file("/etc/network/interfaces")
|
||||
if "eth0" not in interfaces_config and "wlan0" not in interfaces_config:
|
||||
return
|
||||
|
||||
# Disable predictive interface names
|
||||
# c.f. https://unix.stackexchange.com/a/338730
|
||||
os.system("ln -s /dev/null /etc/systemd/network/99-default.link")
|
|
@ -1,99 +0,0 @@
|
|||
import os
|
||||
import glob
|
||||
from shutil import copy2
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.service import _run_service_command
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
PHP5_POOLS = "/etc/php5/fpm/pool.d"
|
||||
PHP7_POOLS = "/etc/php/7.0/fpm/pool.d"
|
||||
|
||||
PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm"
|
||||
PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
|
||||
|
||||
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS)
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate php5-fpm 'pool' conf files to php7 stuff"
|
||||
|
||||
dependencies = ["migrate_to_stretch"]
|
||||
|
||||
def run(self):
|
||||
# Get list of php5 pool files
|
||||
php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS))
|
||||
|
||||
# Keep only basenames
|
||||
php5_pool_files = [os.path.basename(f) for f in php5_pool_files]
|
||||
|
||||
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
|
||||
php5_pool_files = [f for f in php5_pool_files if f != "www.conf"]
|
||||
|
||||
for f in php5_pool_files:
|
||||
|
||||
# Copy the files to the php7 pool
|
||||
src = "{}/{}".format(PHP5_POOLS, f)
|
||||
dest = "{}/{}".format(PHP7_POOLS, f)
|
||||
copy2(src, dest)
|
||||
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest)
|
||||
os.system(c)
|
||||
|
||||
# Also add a comment that it was automatically moved from php5
|
||||
# (for human traceability and backward migration)
|
||||
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
|
||||
os.system(c)
|
||||
|
||||
# Some old comments starting with '#' instead of ';' are not
|
||||
# compatible in php7
|
||||
c = "sed -i 's/^#/;#/g' {}".format(dest)
|
||||
os.system(c)
|
||||
|
||||
# Reload/restart the php pools
|
||||
_run_service_command("restart", "php7.0-fpm")
|
||||
_run_service_command("enable", "php7.0-fpm")
|
||||
os.system("systemctl stop php5-fpm")
|
||||
os.system("systemctl disable php5-fpm")
|
||||
os.system("rm /etc/logrotate.d/php5-fpm") # We remove this otherwise the logrotate cron will be unhappy
|
||||
|
||||
# Get list of nginx conf file
|
||||
nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf")
|
||||
for f in nginx_conf_files:
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, f)
|
||||
os.system(c)
|
||||
|
||||
# Reload nginx
|
||||
_run_service_command("reload", "nginx")
|
||||
|
||||
def backward(self):
|
||||
|
||||
# Get list of php7 pool files
|
||||
php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS))
|
||||
|
||||
# Keep only files which have the migration comment
|
||||
php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT]
|
||||
|
||||
# Delete those files
|
||||
for f in php7_pool_files:
|
||||
os.remove(f)
|
||||
|
||||
# Reload/restart the php pools
|
||||
_run_service_command("stop", "php7.0-fpm")
|
||||
os.system("systemctl start php5-fpm")
|
||||
|
||||
# Get list of nginx conf file
|
||||
nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf")
|
||||
for f in nginx_conf_files:
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP7_SOCKETS_PREFIX, PHP5_SOCKETS_PREFIX, f)
|
||||
os.system(c)
|
||||
|
||||
# Reload nginx
|
||||
_run_service_command("reload", "nginx")
|
|
@ -1,41 +0,0 @@
|
|||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate DBs from Postgresql 9.4 to 9.6 after migrating to Stretch"
|
||||
|
||||
dependencies = ["migrate_to_stretch"]
|
||||
|
||||
def run(self):
|
||||
|
||||
if not self.package_is_installed("postgresql-9.4"):
|
||||
logger.warning(m18n.n("migration_0005_postgresql_94_not_installed"))
|
||||
return
|
||||
|
||||
if not self.package_is_installed("postgresql-9.6"):
|
||||
raise YunohostError("migration_0005_postgresql_96_not_installed")
|
||||
|
||||
if not space_used_by_directory("/var/lib/postgresql/9.4") > free_space_in_directory("/var/lib/postgresql"):
|
||||
raise YunohostError("migration_0005_not_enough_space", path="/var/lib/postgresql/")
|
||||
|
||||
subprocess.check_call("service postgresql stop", shell=True)
|
||||
subprocess.check_call("pg_dropcluster --stop 9.6 main", shell=True)
|
||||
subprocess.check_call("pg_upgradecluster -m upgrade 9.4 main", shell=True)
|
||||
subprocess.check_call("pg_dropcluster --stop 9.4 main", shell=True)
|
||||
subprocess.check_call("service postgresql start", shell=True)
|
||||
|
||||
def package_is_installed(self, package_name):
|
||||
|
||||
p = subprocess.Popen("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), shell=True)
|
||||
p.communicate()
|
||||
return p.returncode == 0
|
|
@ -1,78 +0,0 @@
|
|||
import spwd
|
||||
import crypt
|
||||
import random
|
||||
import string
|
||||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.process import run_commands, check_output
|
||||
from moulinette.utils.filesystem import append_to_file
|
||||
from moulinette.authenticators.ldap import Authenticator
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
SMALL_PWD_LIST = ["yunohost", "olinuxino", "olinux", "raspberry", "admin", "root", "test", "rpi"]
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Synchronize admin and root passwords"
|
||||
|
||||
def run(self):
|
||||
|
||||
new_hash = self._get_admin_hash()
|
||||
self._replace_root_hash(new_hash)
|
||||
|
||||
logger.info(m18n.n("root_password_replaced_by_admin_password"))
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
|
||||
# If the root password is still a "default" value,
|
||||
# then this is an emergency and migration shall
|
||||
# be applied automatically
|
||||
#
|
||||
# Otherwise, as playing with root password is touchy,
|
||||
# we set this as a manual migration.
|
||||
return "auto" if self._is_root_pwd_listed(SMALL_PWD_LIST) else "manual"
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
if self._is_root_pwd_listed(SMALL_PWD_LIST):
|
||||
return None
|
||||
|
||||
return m18n.n("migration_0006_disclaimer")
|
||||
|
||||
def _get_admin_hash(self):
|
||||
"""
|
||||
Fetch the admin hash from the LDAP db using slapcat
|
||||
"""
|
||||
admin_hash = check_output("slapcat \
|
||||
| grep 'dn: cn=admin,dc=yunohost,dc=org' -A20 \
|
||||
| grep userPassword -A2 \
|
||||
| tr -d '\n ' \
|
||||
| tr ':' ' ' \
|
||||
| awk '{print $2}' \
|
||||
| base64 -d \
|
||||
| sed 's/{CRYPT}//g'")
|
||||
return admin_hash
|
||||
|
||||
def _replace_root_hash(self, new_hash):
|
||||
hash_root = spwd.getspnam("root").sp_pwd
|
||||
|
||||
with open('/etc/shadow', 'r') as before_file:
|
||||
before = before_file.read()
|
||||
|
||||
with open('/etc/shadow', 'w') as after_file:
|
||||
after_file.write(before.replace("root:" + hash_root,
|
||||
"root:" + new_hash))
|
||||
|
||||
def _is_root_pwd_listed(self, pwd_list):
|
||||
hash_root = spwd.getspnam("root").sp_pwd
|
||||
|
||||
for password in pwd_list:
|
||||
if hash_root == crypt.crypt(password, hash_root):
|
||||
return True
|
||||
return False
|
|
@ -1,70 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
from shutil import copyfile
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import mkdir, rm
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.service import _run_service_command
|
||||
from yunohost.regenconf import regen_conf
|
||||
from yunohost.settings import settings_set
|
||||
from yunohost.utils.error import YunohostError
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
SSHD_CONF = '/etc/ssh/sshd_config'
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"""
|
||||
This is the first step of a couple of migrations that ensure SSH conf is
|
||||
managed by YunoHost (even if the "from_script" flag is present, which was
|
||||
previously preventing it from being managed by YunoHost)
|
||||
|
||||
The goal of this first (automatic) migration is to make sure that the
|
||||
sshd_config is managed by the regen-conf mechanism.
|
||||
|
||||
If the from_script flag exists, then we keep the current SSH conf such that it
|
||||
will appear as "manually modified" to the regenconf.
|
||||
|
||||
In step 2 (manual), the admin will be able to choose wether or not to actually
|
||||
use the recommended configuration, with an appropriate disclaimer.
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
|
||||
# Check if deprecated DSA Host Key is in config
|
||||
dsa_rgx = r'^[ \t]*HostKey[ \t]+/etc/ssh/ssh_host_dsa_key[ \t]*(?:#.*)?$'
|
||||
dsa = False
|
||||
for line in open(SSHD_CONF):
|
||||
if re.match(dsa_rgx, line) is not None:
|
||||
dsa = True
|
||||
break
|
||||
if dsa:
|
||||
settings_set("service.ssh.allow_deprecated_dsa_hostkey", True)
|
||||
|
||||
# Here, we make it so that /etc/ssh/sshd_config is managed
|
||||
# by the regen conf (in particular in the case where the
|
||||
# from_script flag is present - in which case it was *not*
|
||||
# managed by the regenconf)
|
||||
# But because we can't be sure the user wants to use the
|
||||
# recommended conf, we backup then restore the /etc/ssh/sshd_config
|
||||
# right after the regenconf, such that it will appear as
|
||||
# "manually modified".
|
||||
if os.path.exists('/etc/yunohost/from_script'):
|
||||
rm('/etc/yunohost/from_script')
|
||||
copyfile(SSHD_CONF, '/etc/ssh/sshd_config.bkp')
|
||||
regen_conf(names=['ssh'], force=True)
|
||||
copyfile('/etc/ssh/sshd_config.bkp', SSHD_CONF)
|
||||
|
||||
# Restart ssh and rollback if it failed
|
||||
if not _run_service_command('restart', 'ssh'):
|
||||
# We don't rollback completely but it should be enough
|
||||
copyfile('/etc/ssh/sshd_config.bkp', SSHD_CONF)
|
||||
if not _run_service_command('restart', 'ssh'):
|
||||
raise YunohostError("migration_0007_cannot_restart")
|
||||
else:
|
||||
raise YunohostError("migration_0007_cancelled")
|
|
@ -1,105 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
from moulinette import m18n
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import chown
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.regenconf import _get_conf_hashes, _calculate_hash
|
||||
from yunohost.regenconf import regen_conf
|
||||
from yunohost.settings import settings_set, settings_get
|
||||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.backup import ARCHIVES_PATH
|
||||
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
SSHD_CONF = '/etc/ssh/sshd_config'
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"""
|
||||
In this second step, the admin is asked if it's okay to use
|
||||
the recommended SSH configuration - which also implies
|
||||
disabling deprecated DSA key.
|
||||
|
||||
This has important implications in the way the user may connect
|
||||
to its server (key change, and a spooky warning might be given
|
||||
by SSH later)
|
||||
|
||||
A disclaimer explaining the various things to be aware of is
|
||||
shown - and the user may also choose to skip this migration.
|
||||
"""
|
||||
|
||||
dependencies = ["ssh_conf_managed_by_yunohost_step1"]
|
||||
|
||||
def run(self):
|
||||
settings_set("service.ssh.allow_deprecated_dsa_hostkey", False)
|
||||
regen_conf(names=['ssh'], force=True)
|
||||
|
||||
# Update local archives folder permissions, so that
|
||||
# admin can scp archives out of the server
|
||||
if os.path.isdir(ARCHIVES_PATH):
|
||||
chown(ARCHIVES_PATH, uid="admin", gid="root")
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
|
||||
# If the conf is already up to date
|
||||
# and no DSA key is used, then we're good to go
|
||||
# and the migration can be done automatically
|
||||
# (basically nothing shall change)
|
||||
ynh_hash = _get_conf_hashes('ssh').get(SSHD_CONF, None)
|
||||
current_hash = _calculate_hash(SSHD_CONF)
|
||||
dsa = settings_get("service.ssh.allow_deprecated_dsa_hostkey")
|
||||
if ynh_hash == current_hash and not dsa:
|
||||
return "auto"
|
||||
|
||||
return "manual"
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
|
||||
if self.mode == "auto":
|
||||
return None
|
||||
|
||||
# Detect key things to be aware of before enabling the
|
||||
# recommended configuration
|
||||
dsa_key_enabled = False
|
||||
ports = []
|
||||
root_login = []
|
||||
port_rgx = r'^[ \t]*Port[ \t]+(\d+)[ \t]*(?:#.*)?$'
|
||||
root_rgx = r'^[ \t]*PermitRootLogin[ \t]([^# \t]*)[ \t]*(?:#.*)?$'
|
||||
dsa_rgx = r'^[ \t]*HostKey[ \t]+/etc/ssh/ssh_host_dsa_key[ \t]*(?:#.*)?$'
|
||||
for line in open(SSHD_CONF):
|
||||
|
||||
ports = ports + re.findall(port_rgx, line)
|
||||
|
||||
root_login = root_login + re.findall(root_rgx, line)
|
||||
|
||||
if not dsa_key_enabled and re.match(dsa_rgx, line) is not None:
|
||||
dsa_key_enabled = True
|
||||
|
||||
custom_port = ports != ['22'] and ports != []
|
||||
root_login_enabled = root_login and root_login[-1] != 'no'
|
||||
|
||||
# Build message
|
||||
message = m18n.n("migration_0008_general_disclaimer")
|
||||
|
||||
if custom_port:
|
||||
message += "\n\n" + m18n.n("migration_0008_port")
|
||||
|
||||
if root_login_enabled:
|
||||
message += "\n\n" + m18n.n("migration_0008_root")
|
||||
|
||||
if dsa_key_enabled:
|
||||
message += "\n\n" + m18n.n("migration_0008_dsa")
|
||||
|
||||
if custom_port or root_login_enabled or dsa_key_enabled:
|
||||
message += "\n\n" + m18n.n("migration_0008_warning")
|
||||
else:
|
||||
message += "\n\n" + m18n.n("migration_0008_no_warning")
|
||||
|
||||
return message
|
|
@ -1,39 +0,0 @@
|
|||
import os
|
||||
|
||||
from moulinette import m18n
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from moulinette.utils.filesystem import read_file
|
||||
from yunohost.service import _get_services, _save_services
|
||||
from yunohost.regenconf import _update_conf_hashes, REGEN_CONF_FILE
|
||||
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
"""
|
||||
Decouple the regen conf mechanism from the concept of services
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
|
||||
if "conffiles" not in read_file("/etc/yunohost/services.yml") \
|
||||
or os.path.exists(REGEN_CONF_FILE):
|
||||
logger.warning(m18n.n("migration_0009_not_needed"))
|
||||
return
|
||||
|
||||
# For all services
|
||||
services = _get_services()
|
||||
for service, infos in services.items():
|
||||
# If there are some conffiles (file hashes)
|
||||
if "conffiles" in infos.keys():
|
||||
# Save them using the new regen conf thingy
|
||||
_update_conf_hashes(service, infos["conffiles"])
|
||||
# And delete the old conffile key from the service infos
|
||||
del services[service]["conffiles"]
|
||||
|
||||
# (Actually save the modification of services)
|
||||
_save_services(services)
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
from moulinette.utils.log import getActionLogger
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate from official.json to apps.json (outdated, replaced by migration 13)"
|
||||
|
||||
def run(self):
|
||||
logger.info("This migration is oudated and doesn't do anything anymore. The migration 13 will handle this instead.")
|
||||
pass
|
|
@ -1,181 +0,0 @@
|
|||
import time
|
||||
import os
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_yaml
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.user import user_list, user_group_create, user_group_update
|
||||
from yunohost.app import app_setting, _installed_apps
|
||||
from yunohost.regenconf import regen_conf, BACKUP_CONF_DIR
|
||||
from yunohost.permission import permission_create, user_permission_update, permission_sync_to_user
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
###################################################
|
||||
# Tools used also for restoration
|
||||
###################################################
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
"""
|
||||
Update the LDAP DB to be able to store the permission
|
||||
Create a group for each yunohost user
|
||||
Migrate app permission from apps setting to LDAP
|
||||
"""
|
||||
|
||||
required = True
|
||||
|
||||
def remove_if_exists(self, target):
|
||||
|
||||
from yunohost.utils.ldap import _get_ldap_interface
|
||||
ldap = _get_ldap_interface()
|
||||
|
||||
try:
|
||||
objects = ldap.search(target + ",dc=yunohost,dc=org")
|
||||
# ldap search will raise an exception if no corresponding object is found >.> ...
|
||||
except Exception as e:
|
||||
logger.debug("%s does not exist, no need to delete it" % target)
|
||||
return
|
||||
|
||||
objects.reverse()
|
||||
for o in objects:
|
||||
for dn in o["dn"]:
|
||||
dn = dn.replace(",dc=yunohost,dc=org", "")
|
||||
logger.debug("Deleting old object %s ..." % dn)
|
||||
try:
|
||||
ldap.remove(dn)
|
||||
except Exception as e:
|
||||
raise YunohostError("migration_0011_failed_to_remove_stale_object", dn=dn, error=e)
|
||||
|
||||
def migrate_LDAP_db(self):
|
||||
|
||||
logger.info(m18n.n("migration_0011_update_LDAP_database"))
|
||||
|
||||
from yunohost.utils.ldap import _get_ldap_interface
|
||||
ldap = _get_ldap_interface()
|
||||
|
||||
ldap_map = read_yaml('/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml')
|
||||
|
||||
try:
|
||||
self.remove_if_exists("ou=permission")
|
||||
self.remove_if_exists('ou=groups')
|
||||
|
||||
attr_dict = ldap_map['parents']['ou=permission']
|
||||
ldap.add('ou=permission', attr_dict)
|
||||
|
||||
attr_dict = ldap_map['parents']['ou=groups']
|
||||
ldap.add('ou=groups', attr_dict)
|
||||
|
||||
attr_dict = ldap_map['children']['cn=all_users,ou=groups']
|
||||
ldap.add('cn=all_users,ou=groups', attr_dict)
|
||||
|
||||
attr_dict = ldap_map['children']['cn=visitors,ou=groups']
|
||||
ldap.add('cn=visitors,ou=groups', attr_dict)
|
||||
|
||||
for rdn, attr_dict in ldap_map['depends_children'].items():
|
||||
ldap.add(rdn, attr_dict)
|
||||
except Exception as e:
|
||||
raise YunohostError("migration_0011_LDAP_update_failed", error=e)
|
||||
|
||||
logger.info(m18n.n("migration_0011_create_group"))
|
||||
|
||||
# Create a group for each yunohost user
|
||||
user_list = ldap.search('ou=users,dc=yunohost,dc=org',
|
||||
'(&(objectclass=person)(!(uid=root))(!(uid=nobody)))',
|
||||
['uid', 'uidNumber'])
|
||||
for user_info in user_list:
|
||||
username = user_info['uid'][0]
|
||||
ldap.update('uid=%s,ou=users' % username,
|
||||
{'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh']})
|
||||
user_group_create(username, gid=user_info['uidNumber'][0], primary_group=True, sync_perm=False)
|
||||
user_group_update(groupname='all_users', add=username, force=True, sync_perm=False)
|
||||
|
||||
def migrate_app_permission(self, app=None):
|
||||
logger.info(m18n.n("migration_0011_migrate_permission"))
|
||||
|
||||
apps = _installed_apps()
|
||||
|
||||
if app:
|
||||
if app not in apps:
|
||||
logger.error("Can't migrate permission for app %s because it ain't installed..." % app)
|
||||
apps = []
|
||||
else:
|
||||
apps = [app]
|
||||
|
||||
for app in apps:
|
||||
permission = app_setting(app, 'allowed_users')
|
||||
path = app_setting(app, 'path')
|
||||
domain = app_setting(app, 'domain')
|
||||
|
||||
url = "/" if domain and path else None
|
||||
if permission:
|
||||
known_users = user_list()["users"].keys()
|
||||
allowed = [user for user in permission.split(',') if user in known_users]
|
||||
else:
|
||||
allowed = ["all_users"]
|
||||
permission_create(app+".main", url=url, allowed=allowed, protected=False, sync_perm=False)
|
||||
|
||||
app_setting(app, 'allowed_users', delete=True)
|
||||
|
||||
# Migrate classic public app still using the legacy unprotected_uris
|
||||
if app_setting(app, "unprotected_uris") == "/" or app_setting(app, "skipped_uris") == "/":
|
||||
user_permission_update(app+".main", add="visitors", sync_perm=False)
|
||||
|
||||
permission_sync_to_user()
|
||||
|
||||
def run(self):
|
||||
|
||||
# FIXME : what do we really want to do here ...
|
||||
# Imho we should just force-regen the conf in all case, and maybe
|
||||
# just display a warning if we detect that the conf was manually modified
|
||||
|
||||
# Check if the migration can be processed
|
||||
ldap_regen_conf_status = regen_conf(names=['slapd'], dry_run=True)
|
||||
# By this we check if the have been customized
|
||||
if ldap_regen_conf_status and ldap_regen_conf_status['slapd']['pending']:
|
||||
logger.warning(m18n.n("migration_0011_slapd_config_will_be_overwritten", conf_backup_folder=BACKUP_CONF_DIR))
|
||||
|
||||
# Backup LDAP and the apps settings before to do the migration
|
||||
logger.info(m18n.n("migration_0011_backup_before_migration"))
|
||||
try:
|
||||
backup_folder = "/home/yunohost.backup/premigration/" + time.strftime('%Y%m%d-%H%M%S', time.gmtime())
|
||||
os.makedirs(backup_folder, 0o750)
|
||||
os.system("systemctl stop slapd")
|
||||
os.system("cp -r --preserve /etc/ldap %s/ldap_config" % backup_folder)
|
||||
os.system("cp -r --preserve /var/lib/ldap %s/ldap_db" % backup_folder)
|
||||
os.system("cp -r --preserve /etc/yunohost/apps %s/apps_settings" % backup_folder)
|
||||
except Exception as e:
|
||||
raise YunohostError("migration_0011_can_not_backup_before_migration", error=e)
|
||||
finally:
|
||||
os.system("systemctl start slapd")
|
||||
|
||||
try:
|
||||
# Update LDAP schema restart slapd
|
||||
logger.info(m18n.n("migration_0011_update_LDAP_schema"))
|
||||
regen_conf(names=['slapd'], force=True)
|
||||
|
||||
# Update LDAP database
|
||||
self.migrate_LDAP_db()
|
||||
|
||||
# Migrate permission
|
||||
self.migrate_app_permission()
|
||||
|
||||
permission_sync_to_user()
|
||||
except Exception as e:
|
||||
logger.warn(m18n.n("migration_0011_migration_failed_trying_to_rollback"))
|
||||
os.system("systemctl stop slapd")
|
||||
os.system("rm -r /etc/ldap/slapd.d") # To be sure that we don't keep some part of the old config
|
||||
os.system("cp -r --preserve %s/ldap_config/. /etc/ldap/" % backup_folder)
|
||||
os.system("cp -r --preserve %s/ldap_db/. /var/lib/ldap/" % backup_folder)
|
||||
os.system("cp -r --preserve %s/apps_settings/. /etc/yunohost/apps/" % backup_folder)
|
||||
os.system("systemctl start slapd")
|
||||
os.system("rm -r " + backup_folder)
|
||||
logger.info(m18n.n("migration_0011_rollback_success"))
|
||||
raise
|
||||
else:
|
||||
os.system("rm -r " + backup_folder)
|
||||
|
||||
logger.info(m18n.n("migration_0011_done"))
|
|
@ -1,16 +0,0 @@
|
|||
import glob
|
||||
import re
|
||||
from yunohost.tools import Migration
|
||||
from moulinette.utils.filesystem import read_file, write_to_file
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Force authentication in md5 for local connexions"
|
||||
|
||||
all_hba_files = glob.glob("/etc/postgresql/*/*/pg_hba.conf")
|
||||
|
||||
def run(self):
|
||||
for filename in self.all_hba_files:
|
||||
pg_hba_in = read_file(filename)
|
||||
write_to_file(filename, re.sub(r"local(\s*)all(\s*)all(\s*)password", "local\\1all\\2all\\3md5", pg_hba_in))
|
|
@ -1,51 +0,0 @@
|
|||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_json
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.app import (_initialize_apps_catalog_system,
|
||||
_update_apps_catalog,
|
||||
APPS_CATALOG_CACHE,
|
||||
APPS_CATALOG_CONF)
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
LEGACY_APPS_CATALOG_CONF = '/etc/yunohost/appslists.json'
|
||||
LEGACY_APPS_CATALOG_CONF_BACKUP = LEGACY_APPS_CATALOG_CONF + ".old"
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate to the new future-proof apps catalog system"
|
||||
|
||||
def run(self):
|
||||
|
||||
if not os.path.exists(LEGACY_APPS_CATALOG_CONF):
|
||||
logger.info("No need to do anything")
|
||||
|
||||
# Destroy old lecacy cache
|
||||
if os.path.exists(APPS_CATALOG_CACHE):
|
||||
shutil.rmtree(APPS_CATALOG_CACHE)
|
||||
|
||||
# and legacy cron
|
||||
if os.path.exists("/etc/cron.daily/yunohost-fetch-appslists"):
|
||||
os.remove("/etc/cron.daily/yunohost-fetch-appslists")
|
||||
|
||||
# Backup the legacy file
|
||||
try:
|
||||
legacy_catalogs = read_json(LEGACY_APPS_CATALOG_CONF)
|
||||
# If there's only one catalog, we assume it's just the old official catalog
|
||||
# Otherwise, warn the (power-?)users that they should migrate their old catalogs manually
|
||||
if len(legacy_catalogs) > 1:
|
||||
logger.warning("It looks like you had additional apps_catalog in the configuration file %s! YunoHost now uses %s instead, but it won't migrate your custom apps_catalog. You should do this manually. The old file has been backuped in %s." % (LEGACY_APPS_CATALOG_CONF, APPS_CATALOG_CONF, LEGACY_APPS_CATALOG_CONF_BACKUP))
|
||||
except Exception as e:
|
||||
logger.warning("Unable to parse the legacy conf %s (error : %s) ... migrating anyway" % (LEGACY_APPS_CATALOG_CONF, str(e)))
|
||||
|
||||
if os.path.exists(LEGACY_APPS_CATALOG_CONF):
|
||||
os.rename(LEGACY_APPS_CATALOG_CONF, LEGACY_APPS_CATALOG_CONF_BACKUP)
|
||||
|
||||
_initialize_apps_catalog_system()
|
||||
_update_apps_catalog()
|
|
@ -1,31 +0,0 @@
|
|||
import os
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_json
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.app import app_setting, APPS_SETTING_PATH
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"""Remove legacy app status.json files"""
|
||||
|
||||
def run(self):
|
||||
|
||||
apps = os.listdir(APPS_SETTING_PATH)
|
||||
|
||||
for app in apps:
|
||||
status_file = os.path.join(APPS_SETTING_PATH, app, "status.json")
|
||||
if not os.path.exists(status_file):
|
||||
continue
|
||||
|
||||
try:
|
||||
status = read_json(status_file)
|
||||
current_revision = status.get("remote", {}).get("revision", "?")
|
||||
app_setting(app, 'current_revision', current_revision)
|
||||
except Exception as e:
|
||||
logger.warning("Could not migrate status.json from app %s: %s", (app, str(e)))
|
||||
else:
|
||||
os.system("rm %s" % status_file)
|
244
src/yunohost/data_migrations/0015_migrate_to_buster.py
Normal file
244
src/yunohost/data_migrations/0015_migrate_to_buster.py
Normal file
|
@ -0,0 +1,244 @@
|
|||
|
||||
import glob
|
||||
import os
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.process import check_output, call_async_output
|
||||
from moulinette.utils.filesystem import read_file
|
||||
|
||||
from yunohost.tools import Migration, tools_update, tools_upgrade
|
||||
from yunohost.app import unstable_apps
|
||||
from yunohost.regenconf import manually_modified_files
|
||||
from yunohost.utils.filesystem import free_space_in_directory
|
||||
from yunohost.utils.packages import get_ynh_package_version, _list_upgradable_apt_packages
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Upgrade the system to Debian Buster and Yunohost 4.x"
|
||||
|
||||
mode = "manual"
|
||||
|
||||
def run(self):
|
||||
|
||||
self.check_assertions()
|
||||
|
||||
logger.info(m18n.n("migration_0015_start"))
|
||||
|
||||
#
|
||||
# Make sure certificates do not use weak signature hash algorithms (md5, sha1)
|
||||
# otherwise nginx will later refuse to start which result in
|
||||
# catastrophic situation
|
||||
#
|
||||
self.validate_and_upgrade_cert_if_necessary()
|
||||
|
||||
#
|
||||
# Patch sources.list
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_patching_sources_list"))
|
||||
self.patch_apt_sources_list()
|
||||
tools_update(system=True)
|
||||
|
||||
# Tell libc6 it's okay to restart system stuff during the upgrade
|
||||
os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections")
|
||||
|
||||
# Don't send an email to root about the postgresql migration. It should be handled automatically after.
|
||||
os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections")
|
||||
|
||||
#
|
||||
# Specific packages upgrades
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_specific_upgrade"))
|
||||
|
||||
# Update unscd independently, was 0.53-1+yunohost on stretch (custom build of ours) but now it's 0.53-1+b1 on vanilla buster,
|
||||
# which for apt appears as a lower version (hence the --allow-downgrades and the hardcoded version number)
|
||||
unscd_version = check_output('dpkg -s unscd | grep "^Version: " | cut -d " " -f 2')
|
||||
if "yunohost" in unscd_version:
|
||||
new_version = check_output("LC_ALL=C apt policy unscd 2>/dev/null | grep -v '\\*\\*\\*' | grep http -B1 | head -n 1 | awk '{print $1}'").strip()
|
||||
if new_version:
|
||||
self.apt_install('unscd=%s --allow-downgrades' % new_version)
|
||||
else:
|
||||
logger.warning("Could not identify which version of unscd to install")
|
||||
|
||||
# Upgrade libpam-modules independently, small issue related to willing to overwrite a file previously provided by Yunohost
|
||||
libpammodules_version = check_output('dpkg -s libpam-modules | grep "^Version: " | cut -d " " -f 2')
|
||||
if not libpammodules_version.startswith("1.3"):
|
||||
self.apt_install('libpam-modules -o Dpkg::Options::="--force-overwrite"')
|
||||
|
||||
#
|
||||
# Main upgrade
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_main_upgrade"))
|
||||
|
||||
apps_packages = self.get_apps_equivs_packages()
|
||||
self.hold(apps_packages)
|
||||
tools_upgrade(system=True, allow_yunohost_upgrade=False)
|
||||
|
||||
if self.debian_major_version() == 9:
|
||||
raise YunohostError("migration_0015_still_on_stretch_after_main_upgrade")
|
||||
|
||||
# Clean the mess
|
||||
logger.info(m18n.n("migration_0015_cleaning_up"))
|
||||
os.system("apt autoremove --assume-yes")
|
||||
os.system("apt clean --assume-yes")
|
||||
|
||||
#
|
||||
# Yunohost upgrade
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_yunohost_upgrade"))
|
||||
self.unhold(apps_packages)
|
||||
tools_upgrade(system=True)
|
||||
|
||||
def debian_major_version(self):
|
||||
# The python module "platform" and lsb_release are not reliable because
|
||||
# on some setup, they may still return Release=9 even after upgrading to
|
||||
# buster ... (Apparently this is related to OVH overriding some stuff
|
||||
# with /etc/lsb-release for instance -_-)
|
||||
# Instead, we rely on /etc/os-release which should be the raw info from
|
||||
# the distribution...
|
||||
return int(check_output("grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2"))
|
||||
|
||||
def yunohost_major_version(self):
|
||||
return int(get_ynh_package_version("yunohost")["version"].split('.')[0])
|
||||
|
||||
def check_assertions(self):
|
||||
|
||||
# Be on stretch (9.x) and yunohost 3.x
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be > 9.x but yunohost package
|
||||
# would still be in 3.x...
|
||||
if not self.debian_major_version() == 9 \
|
||||
and not self.yunohost_major_version() == 3:
|
||||
raise YunohostError("migration_0015_not_stretch")
|
||||
|
||||
# Have > 1 Go free space on /var/ ?
|
||||
if free_space_in_directory("/var/") / (1024**3) < 1.0:
|
||||
raise YunohostError("migration_0015_not_enough_free_space")
|
||||
|
||||
# Check system is up to date
|
||||
# (but we don't if 'stretch' is already in the sources.list ...
|
||||
# which means maybe a previous upgrade crashed and we're re-running it)
|
||||
if " buster " not in read_file("/etc/apt/sources.list"):
|
||||
tools_update(system=True)
|
||||
upgradable_system_packages = list(_list_upgradable_apt_packages())
|
||||
if upgradable_system_packages:
|
||||
raise YunohostError("migration_0015_system_not_fully_up_to_date")
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
|
||||
# Avoid having a super long disclaimer + uncessary check if we ain't
|
||||
# on stretch / yunohost 3.x anymore
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be >= 10.x but yunohost package
|
||||
# would still be in 3.x...
|
||||
if not self.debian_major_version() == 9 \
|
||||
and not self.yunohost_major_version() == 3:
|
||||
return None
|
||||
|
||||
# Get list of problematic apps ? I.e. not official or community+working
|
||||
problematic_apps = unstable_apps()
|
||||
problematic_apps = "".join(["\n - " + app for app in problematic_apps])
|
||||
|
||||
# Manually modified files ? (c.f. yunohost service regen-conf)
|
||||
modified_files = manually_modified_files()
|
||||
modified_files = "".join(["\n - " + f for f in modified_files])
|
||||
|
||||
message = m18n.n("migration_0015_general_warning")
|
||||
|
||||
message = "N.B.: This migration has been tested by the community over the last few months but has only been declared stable recently. If your server hosts critical services and if you are not too confident with debugging possible issues, we recommend you to wait a little bit more while we gather more feedback and polish things up. If on the other hand you are relatively confident with debugging small issues that may arise, you are encouraged to run this migration ;)! You can read about remaining known issues and feedback from the community here: https://forum.yunohost.org/t/12195\n\n" + message
|
||||
|
||||
if problematic_apps:
|
||||
message += "\n\n" + m18n.n("migration_0015_problematic_apps_warning", problematic_apps=problematic_apps)
|
||||
|
||||
if modified_files:
|
||||
message += "\n\n" + m18n.n("migration_0015_modified_files", manually_modified_files=modified_files)
|
||||
|
||||
return message
|
||||
|
||||
def patch_apt_sources_list(self):
|
||||
|
||||
sources_list = glob.glob("/etc/apt/sources.list.d/*.list")
|
||||
sources_list.append("/etc/apt/sources.list")
|
||||
|
||||
# This :
|
||||
# - replace single 'stretch' occurence by 'buster'
|
||||
# - comments lines containing "backports"
|
||||
# - replace 'stretch/updates' by 'strech/updates' (or same with -)
|
||||
for f in sources_list:
|
||||
command = "sed -i -e 's@ stretch @ buster @g' " \
|
||||
"-e '/backports/ s@^#*@#@' " \
|
||||
"-e 's@ stretch/updates @ buster/updates @g' " \
|
||||
"-e 's@ stretch-@ buster-@g' " \
|
||||
"{}".format(f)
|
||||
os.system(command)
|
||||
|
||||
def get_apps_equivs_packages(self):
|
||||
|
||||
command = "dpkg --get-selections" \
|
||||
" | grep -v deinstall" \
|
||||
" | awk '{print $1}'" \
|
||||
" | { grep 'ynh-deps$' || true; }"
|
||||
|
||||
output = check_output(command).strip()
|
||||
|
||||
return output.split('\n') if output else []
|
||||
|
||||
def hold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark hold {}".format(package))
|
||||
|
||||
def unhold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark unhold {}".format(package))
|
||||
|
||||
def apt_install(self, cmd):
|
||||
|
||||
def is_relevant(l):
|
||||
return "Reading database ..." not in l.rstrip()
|
||||
|
||||
callbacks = (
|
||||
lambda l: logger.info("+ " + l.rstrip() + "\r") if is_relevant(l) else logger.debug(l.rstrip() + "\r"),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
)
|
||||
|
||||
cmd = "LC_ALL=C DEBIAN_FRONTEND=noninteractive APT_LISTCHANGES_FRONTEND=none apt install --quiet -o=Dpkg::Use-Pty=0 --fix-broken --assume-yes " + cmd
|
||||
|
||||
logger.debug("Running: %s" % cmd)
|
||||
|
||||
call_async_output(cmd, callbacks, shell=True)
|
||||
|
||||
def validate_and_upgrade_cert_if_necessary(self):
|
||||
|
||||
active_certs = set(check_output("grep -roh '/.*crt.pem' /etc/nginx/").strip().split("\n"))
|
||||
|
||||
cmd = "LC_ALL=C openssl x509 -in %s -text -noout | grep -i 'Signature Algorithm:' | awk '{print $3}' | uniq"
|
||||
|
||||
default_crt = '/etc/yunohost/certs/yunohost.org/crt.pem'
|
||||
default_key = '/etc/yunohost/certs/yunohost.org/key.pem'
|
||||
default_signature = check_output(cmd % default_crt).strip() if default_crt in active_certs else None
|
||||
if default_signature is not None and (default_signature.startswith("md5") or default_signature.startswith("sha1")):
|
||||
logger.warning("%s is using a pretty old certificate incompatible with newer versions of nginx ... attempting to regenerate a fresh one" % default_crt)
|
||||
|
||||
os.system("mv %s %s.old" % (default_crt, default_crt))
|
||||
os.system("mv %s %s.old" % (default_key, default_key))
|
||||
ret = os.system("/usr/share/yunohost/hooks/conf_regen/02-ssl init")
|
||||
|
||||
if ret != 0 or not os.path.exists(default_crt):
|
||||
logger.error("Upgrading the certificate failed ... reverting")
|
||||
os.system("mv %s.old %s" % (default_crt, default_crt))
|
||||
os.system("mv %s.old %s" % (default_key, default_key))
|
||||
|
||||
signatures = {cert: check_output(cmd % cert).strip() for cert in active_certs}
|
||||
|
||||
def cert_is_weak(cert):
|
||||
sig = signatures[cert]
|
||||
return sig.startswith("md5") or sig.startswith("sha1")
|
||||
|
||||
weak_certs = [cert for cert in signatures.keys() if cert_is_weak(cert)]
|
||||
if weak_certs:
|
||||
raise YunohostError("migration_0015_weak_certs", certs=", ".join(weak_certs))
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue