mirror of
https://github.com/YunoHost/yunohost.git
synced 2024-09-03 20:06:10 +02:00
Merge branch 'dev' into permission_protection
This commit is contained in:
commit
e6da35abd1
87 changed files with 1982 additions and 2169 deletions
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -13,10 +13,3 @@
|
|||
## How to test
|
||||
|
||||
...
|
||||
|
||||
## Validation
|
||||
|
||||
- [ ] Principle agreement 0/2 :
|
||||
- [ ] Quick review 0/1 :
|
||||
- [ ] Simple test 0/1 :
|
||||
- [ ] Deep review 0/1 :
|
||||
|
|
|
@ -38,7 +38,7 @@ build-ssowat:
|
|||
variables:
|
||||
PACKAGE: "ssowat"
|
||||
script:
|
||||
- git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE
|
||||
- git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE --depth 1 || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE --depth 1
|
||||
- DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE
|
||||
- *build_script
|
||||
|
||||
|
@ -47,6 +47,6 @@ build-moulinette:
|
|||
variables:
|
||||
PACKAGE: "moulinette"
|
||||
script:
|
||||
- git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE
|
||||
- git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE --depth 1 || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE --depth 1
|
||||
- DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE
|
||||
- *build_script
|
||||
|
|
|
@ -16,7 +16,7 @@ upgrade:
|
|||
extends: .install-stage
|
||||
image: "after-install"
|
||||
script:
|
||||
- apt update
|
||||
- apt-get update -o Acquire::Retries=3
|
||||
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
|
||||
|
||||
|
||||
|
@ -24,6 +24,6 @@ install-postinstall:
|
|||
extends: .install-stage
|
||||
image: "before-install"
|
||||
script:
|
||||
- apt update
|
||||
- apt-get update -o Acquire::Retries=3
|
||||
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
|
||||
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns
|
||||
|
|
|
@ -1,24 +1,43 @@
|
|||
########################################
|
||||
# LINTER
|
||||
########################################
|
||||
# later we must fix lint and format-check jobs and remove "allow_failure"
|
||||
|
||||
lint:
|
||||
lint27:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
allow_failure: true
|
||||
script:
|
||||
- tox -e lint
|
||||
- tox -e py27-lint
|
||||
|
||||
invalidcode:
|
||||
lint37:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
allow_failure: true
|
||||
script:
|
||||
- tox -e py37-lint
|
||||
|
||||
invalidcode27:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
script:
|
||||
- tox -e invalidcode
|
||||
- tox -e py27-invalidcode
|
||||
|
||||
# Disabled, waiting for buster
|
||||
#format-check:
|
||||
# extends: .lint-stage
|
||||
# script:
|
||||
# - black --check --diff
|
||||
invalidcode37:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
allow_failure: true
|
||||
needs: []
|
||||
script:
|
||||
- tox -e py37-invalidcode
|
||||
|
||||
format-check:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
allow_failure: true
|
||||
script:
|
||||
- tox -e py37-black
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
.install_debs: &install_debs
|
||||
- apt update
|
||||
- apt-get update -o Acquire::Retries=3
|
||||
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
|
||||
|
||||
.test-stage:
|
||||
|
@ -36,7 +36,7 @@ full-tests:
|
|||
- *install_debs
|
||||
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns
|
||||
script:
|
||||
- pytest --cov=yunohost tests/ src/yunohost/tests/ --junitxml=report.xml
|
||||
- python -m pytest --cov=yunohost tests/ src/yunohost/tests/ --junitxml=report.xml
|
||||
needs:
|
||||
- job: build-yunohost
|
||||
artifacts: true
|
||||
|
@ -51,70 +51,70 @@ full-tests:
|
|||
root-tests:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- py.test tests
|
||||
- python -m pytest tests
|
||||
|
||||
test-apps:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_apps.py
|
||||
- python -m pytest tests/test_apps.py
|
||||
|
||||
test-appscatalog:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_appscatalog.py
|
||||
- python -m pytest tests/test_appscatalog.py
|
||||
|
||||
test-appurl:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_appurl.py
|
||||
- python -m pytest tests/test_appurl.py
|
||||
|
||||
test-apps-arguments-parsing:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_apps_arguments_parsing.py
|
||||
- python -m pytest tests/test_apps_arguments_parsing.py
|
||||
|
||||
test-backuprestore:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_backuprestore.py
|
||||
- python -m pytest tests/test_backuprestore.py
|
||||
|
||||
test-changeurl:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_changeurl.py
|
||||
- python -m pytest tests/test_changeurl.py
|
||||
|
||||
test-permission:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_permission.py
|
||||
- python -m pytest tests/test_permission.py
|
||||
|
||||
test-settings:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_settings.py
|
||||
- python -m pytest tests/test_settings.py
|
||||
|
||||
test-user-group:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_user-group.py
|
||||
- python -m pytest tests/test_user-group.py
|
||||
|
||||
test-regenconf:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_regenconf.py
|
||||
- python -m pytest tests/test_regenconf.py
|
||||
|
||||
test-service:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_service.py
|
||||
- python -m pytest tests/test_service.py
|
||||
|
|
12
.travis.yml
12
.travis.yml
|
@ -2,12 +2,18 @@ language: python
|
|||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- env: TOXENV=lint
|
||||
- env: TOXENV=py27-lint
|
||||
- env: TOXENV=py37-lint
|
||||
- env: TOXENV=py37-invalidcode
|
||||
include:
|
||||
- python: 2.7
|
||||
env: TOXENV=py27
|
||||
env: TOXENV=py27-lint
|
||||
- python: 2.7
|
||||
env: TOXENV=lint
|
||||
env: TOXENV=py27-invalidcode
|
||||
- python: 3.7
|
||||
env: TOXENV=py37-lint
|
||||
- python: 3.7
|
||||
env: TOXENV=py37-invalidcode
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
|
|
@ -100,7 +100,7 @@ ynh_package_version() {
|
|||
# Requires YunoHost version 2.4.0.3 or higher.
|
||||
ynh_apt() {
|
||||
ynh_wait_dpkg_free
|
||||
LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get --assume-yes --quiet -o=Dpkg::Use-Pty=0 $@
|
||||
LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get --assume-yes --quiet -o=Acquire::Retries=3 -o=Dpkg::Use-Pty=0 $@
|
||||
}
|
||||
|
||||
# Update package index files
|
||||
|
@ -260,11 +260,11 @@ ynh_install_app_dependencies () {
|
|||
# And we have packages from sury installed (7.0.33-10+weirdshiftafter instead of 7.0.33-0 on debian)
|
||||
if dpkg --list | grep "php7.0" | grep --quiet --invert-match "7.0.33-0+deb9"
|
||||
then
|
||||
# And sury ain't already installed
|
||||
if ! grep --line-number --recursive --quiet "sury" /etc/apt/sources.list*
|
||||
# And sury ain't already in sources.lists
|
||||
if ! grep --recursive --quiet "^ *deb.*sury" /etc/apt/sources.list*
|
||||
then
|
||||
# Re-add sury
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --name=extra_php_version
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --name=extra_php_version --priority=600
|
||||
|
||||
# Pin this sury repository to prevent sury of doing shit
|
||||
for package_to_not_upgrade in "php" "php-fpm" "php-mysql" "php-xml" "php-zip" "php-mbstring" "php-ldap" "php-gd" "php-curl" "php-bz2" "php-json" "php-sqlite3" "php-intl" "openssl" "libssl1.1" "libssl-dev"
|
||||
|
@ -467,8 +467,8 @@ ynh_remove_extra_repo () {
|
|||
|
||||
ynh_secure_remove "/etc/apt/sources.list.d/$name.list"
|
||||
ynh_secure_remove "/etc/apt/preferences.d/$name"
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.gpg"
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.asc"
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.gpg" > /dev/null
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.asc" > /dev/null
|
||||
|
||||
# Update the list of package to exclude the old repo
|
||||
ynh_package_update
|
||||
|
|
|
@ -80,10 +80,10 @@ ynh_handle_getopts_args () {
|
|||
arguments[arg]="${arguments[arg]//--${args_array[$option_flag]}-/--${args_array[$option_flag]}\\TOBEREMOVED\\-}"
|
||||
# And replace long option (value of the option_flag) by the short option, the option_flag itself
|
||||
# (e.g. for [u]=user, --user will be -u)
|
||||
# Replace long option with =
|
||||
arguments[arg]="${arguments[arg]//--${args_array[$option_flag]}/-${option_flag} }"
|
||||
# And long option without =
|
||||
arguments[arg]="${arguments[arg]//--${args_array[$option_flag]%=}/-${option_flag}}"
|
||||
# Replace long option with = (match the beginning of the argument)
|
||||
arguments[arg]="$(echo "${arguments[arg]}" | sed "s/^--${args_array[$option_flag]}/-${option_flag} /")"
|
||||
# And long option without = (match the whole line)
|
||||
arguments[arg]="$(echo "${arguments[arg]}" | sed "s/^--${args_array[$option_flag]%=}$/-${option_flag} /")"
|
||||
done
|
||||
done
|
||||
|
||||
|
|
|
@ -44,8 +44,13 @@ ynh_mysql_execute_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
ynh_mysql_connect_as --user="root" --password="$(cat $MYSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" <<< "$sql"
|
||||
$database <<< "$sql"
|
||||
}
|
||||
|
||||
# Execute a command from a file as root user
|
||||
|
@ -65,8 +70,14 @@ ynh_mysql_execute_file_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
|
||||
ynh_mysql_connect_as --user="root" --password="$(cat $MYSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" < "$file"
|
||||
$database < "$file"
|
||||
}
|
||||
|
||||
# Create a database and grant optionnaly privilegies to a user
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
n_version=6.5.1
|
||||
n_install_dir="/opt/node_n"
|
||||
node_version_path="$n_install_dir/n/versions/node"
|
||||
# N_PREFIX is the directory of n, it needs to be loaded as a environment variable.
|
||||
|
@ -16,8 +17,8 @@ ynh_install_n () {
|
|||
ynh_print_info --message="Installation of N - Node.js version management"
|
||||
# Build an app.src for n
|
||||
mkdir --parents "../conf"
|
||||
echo "SOURCE_URL=https://github.com/tj/n/archive/v4.1.0.tar.gz
|
||||
SOURCE_SUM=3983fa3f00d4bf85ba8e21f1a590f6e28938093abe0bb950aeea52b1717471fc" > "../conf/n.src"
|
||||
echo "SOURCE_URL=https://github.com/tj/n/archive/v${n_version}.tar.gz
|
||||
SOURCE_SUM=5833f15893b9951a9ed59487e87b6c181d96b83a525846255872c4f92f0d25dd" > "../conf/n.src"
|
||||
# Download and extract n
|
||||
ynh_setup_source --dest_dir="$n_install_dir/git" --source_id=n
|
||||
# Install n
|
||||
|
@ -125,7 +126,10 @@ ynh_install_nodejs () {
|
|||
test -x /usr/bin/npm && mv /usr/bin/npm /usr/bin/npm_n
|
||||
|
||||
# If n is not previously setup, install it
|
||||
if ! test $(n --version > /dev/null 2>&1)
|
||||
if ! $n_install_dir/bin/n --version > /dev/null 2>&1
|
||||
then
|
||||
ynh_install_n
|
||||
elif dpkg --compare-versions "$($n_install_dir/bin/n --version)" lt $n_version
|
||||
then
|
||||
ynh_install_n
|
||||
fi
|
||||
|
|
|
@ -1,37 +1,37 @@
|
|||
#!/bin/bash
|
||||
|
||||
readonly YNH_DEFAULT_PHP_VERSION=7.0
|
||||
# Declare the actual php version to use.
|
||||
# A packager willing to use another version of php can override the variable into its _common.sh.
|
||||
readonly YNH_DEFAULT_PHP_VERSION=7.3
|
||||
# Declare the actual PHP version to use.
|
||||
# A packager willing to use another version of PHP can override the variable into its _common.sh.
|
||||
YNH_PHP_VERSION=${YNH_PHP_VERSION:-$YNH_DEFAULT_PHP_VERSION}
|
||||
|
||||
# Create a dedicated php-fpm config
|
||||
# Create a dedicated PHP-FPM config
|
||||
#
|
||||
# usage 1: ynh_add_fpm_config [--phpversion=7.X] [--use_template] [--package=packages] [--dedicated_service]
|
||||
# | arg: -v, --phpversion= - Version of php to use.
|
||||
# | arg: -v, --phpversion= - Version of PHP to use.
|
||||
# | arg: -t, --use_template - Use this helper in template mode.
|
||||
# | arg: -p, --package= - Additionnal php packages to install
|
||||
# | arg: -d, --dedicated_service - Use a dedicated php-fpm service instead of the common one.
|
||||
# | arg: -p, --package= - Additionnal PHP packages to install
|
||||
# | arg: -d, --dedicated_service - Use a dedicated PHP-FPM service instead of the common one.
|
||||
#
|
||||
# -----------------------------------------------------------------------------
|
||||
#
|
||||
# usage 2: ynh_add_fpm_config [--phpversion=7.X] --usage=usage --footprint=footprint [--package=packages] [--dedicated_service]
|
||||
# | arg: -v, --phpversion= - Version of php to use.
|
||||
# | arg: -v, --phpversion= - Version of PHP to use.
|
||||
# | arg: -f, --footprint= - Memory footprint of the service (low/medium/high).
|
||||
# low - Less than 20Mb of ram by pool.
|
||||
# medium - Between 20Mb and 40Mb of ram by pool.
|
||||
# high - More than 40Mb of ram by pool.
|
||||
# Or specify exactly the footprint, the load of the service as Mb by pool instead of having a standard value.
|
||||
# low - Less than 20 MB of RAM by pool.
|
||||
# medium - Between 20 MB and 40 MB of RAM by pool.
|
||||
# high - More than 40 MB of RAM by pool.
|
||||
# Or specify exactly the footprint, the load of the service as MB by pool instead of having a standard value.
|
||||
# To have this value, use the following command and stress the service.
|
||||
# watch -n0.5 ps -o user,cmd,%cpu,rss -u APP
|
||||
#
|
||||
# | arg: -u, --usage= - Expected usage of the service (low/medium/high).
|
||||
# low - Personal usage, behind the sso.
|
||||
# low - Personal usage, behind the SSO.
|
||||
# medium - Low usage, few people or/and publicly accessible.
|
||||
# high - High usage, frequently visited website.
|
||||
#
|
||||
# | arg: -p, --package= - Additionnal php packages to install for a specific version of php
|
||||
# | arg: -d, --dedicated_service - Use a dedicated php-fpm service instead of the common one.
|
||||
# | arg: -p, --package= - Additionnal PHP packages to install for a specific version of PHP
|
||||
# | arg: -d, --dedicated_service - Use a dedicated PHP-FPM service instead of the common one.
|
||||
#
|
||||
#
|
||||
# The footprint of the service will be used to defined the maximum footprint we can allow, which is half the maximum RAM.
|
||||
|
@ -85,7 +85,7 @@ ynh_add_fpm_config () {
|
|||
# Set the default PHP-FPM version by default
|
||||
phpversion="${phpversion:-$YNH_PHP_VERSION}"
|
||||
|
||||
# If the requested php version is not the default version for YunoHost
|
||||
# If the requested PHP version is not the default version for YunoHost
|
||||
if [ "$phpversion" != "$YNH_DEFAULT_PHP_VERSION" ]
|
||||
then
|
||||
# If the argument --package is used, add the packages to ynh_install_php to install them from sury
|
||||
|
@ -95,7 +95,7 @@ ynh_add_fpm_config () {
|
|||
else
|
||||
local additionnal_packages=""
|
||||
fi
|
||||
# Install this specific version of php.
|
||||
# Install this specific version of PHP.
|
||||
ynh_install_php --phpversion="$phpversion" "$additionnal_packages"
|
||||
elif [ -n "$package" ]
|
||||
then
|
||||
|
@ -118,7 +118,7 @@ ynh_add_fpm_config () {
|
|||
fpm_service="php5-fpm"
|
||||
fi
|
||||
|
||||
# Create the directory for fpm pools
|
||||
# Create the directory for FPM pools
|
||||
mkdir --parents "$fpm_config_dir/pool.d"
|
||||
|
||||
ynh_app_setting_set --app=$app --key=fpm_config_dir --value="$fpm_config_dir"
|
||||
|
@ -127,7 +127,7 @@ ynh_add_fpm_config () {
|
|||
ynh_app_setting_set --app=$app --key=phpversion --value=$phpversion
|
||||
finalphpconf="$fpm_config_dir/pool.d/$app.conf"
|
||||
|
||||
# Migrate from mutual php service to dedicated one.
|
||||
# Migrate from mutual PHP service to dedicated one.
|
||||
if [ $dedicated_service -eq 1 ]
|
||||
then
|
||||
local old_fpm_config_dir="/etc/php/$phpversion/fpm"
|
||||
|
@ -137,9 +137,9 @@ ynh_add_fpm_config () {
|
|||
ynh_print_info --message="Migrate to a dedicated php-fpm service for $app."
|
||||
# Create a backup of the old file before migration
|
||||
ynh_backup_if_checksum_is_different --file="$old_fpm_config_dir/pool.d/$app.conf"
|
||||
# Remove the old php config file
|
||||
# Remove the old PHP config file
|
||||
ynh_secure_remove --file="$old_fpm_config_dir/pool.d/$app.conf"
|
||||
# Reload php to release the socket and allow the dedicated service to use it
|
||||
# Reload PHP to release the socket and allow the dedicated service to use it
|
||||
ynh_systemd_action --service_name=php${phpversion}-fpm --action=reload
|
||||
fi
|
||||
fi
|
||||
|
@ -148,21 +148,27 @@ ynh_add_fpm_config () {
|
|||
|
||||
if [ $use_template -eq 1 ]
|
||||
then
|
||||
# Usage 1, use the template in ../conf/php-fpm.conf
|
||||
cp ../conf/php-fpm.conf "$finalphpconf"
|
||||
# Usage 1, use the template in conf/php-fpm.conf
|
||||
local phpfpm_path="../conf/php-fpm.conf"
|
||||
if [ ! -e "$phpfpm_path" ]; then
|
||||
phpfpm_path="../settings/conf/php-fpm.conf" # Into the restore script, the PHP-FPM template is not at the same place
|
||||
fi
|
||||
# Make sure now that the template indeed exists
|
||||
[ -e "$phpfpm_path" ] || ynh_die --message="Unable to find template to configure PHP-FPM."
|
||||
cp "$phpfpm_path" "$finalphpconf"
|
||||
ynh_replace_string --match_string="__NAMETOCHANGE__" --replace_string="$app" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string="__USER__" --replace_string="$app" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string="__PHPVERSION__" --replace_string="$phpversion" --target_file="$finalphpconf"
|
||||
|
||||
else
|
||||
# Usage 2, generate a php-fpm config file with ynh_get_scalable_phpfpm
|
||||
# Usage 2, generate a PHP-FPM config file with ynh_get_scalable_phpfpm
|
||||
|
||||
# Store settings
|
||||
ynh_app_setting_set --app=$app --key=fpm_footprint --value=$footprint
|
||||
ynh_app_setting_set --app=$app --key=fpm_usage --value=$usage
|
||||
|
||||
# Define the values to use for the configuration of php.
|
||||
# Define the values to use for the configuration of PHP.
|
||||
ynh_get_scalable_phpfpm --usage=$usage --footprint=$footprint
|
||||
|
||||
# Copy the default file
|
||||
|
@ -175,7 +181,7 @@ ynh_add_fpm_config () {
|
|||
ynh_replace_string --match_string="^group = .*" --replace_string="group = $app" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string=".*chdir = .*" --replace_string="chdir = $final_path" --target_file="$finalphpconf"
|
||||
|
||||
# Configure fpm children
|
||||
# Configure FPM children
|
||||
ynh_replace_string --match_string=".*pm = .*" --replace_string="pm = $php_pm" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string=".*pm.max_children = .*" --replace_string="pm.max_children = $php_max_children" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string=".*pm.max_requests = .*" --replace_string="pm.max_requests = 500" --target_file="$finalphpconf"
|
||||
|
@ -232,7 +238,7 @@ ynh_add_fpm_config () {
|
|||
ynh_replace_string --match_string="^[; ]*syslog.ident *=.*" --replace_string="syslog.ident = php-fpm-$app" --target_file="$globalphpconf"
|
||||
ynh_replace_string --match_string="^[; ]*include *=.*" --replace_string="include = $finalphpconf" --target_file="$globalphpconf"
|
||||
|
||||
# Create a config for a dedicated php-fpm service for the app
|
||||
# Create a config for a dedicated PHP-FPM service for the app
|
||||
echo "[Unit]
|
||||
Description=PHP $phpversion FastCGI Process Manager for $app
|
||||
After=network.target
|
||||
|
@ -247,7 +253,7 @@ ExecReload=/bin/kill -USR2 \$MAINPID
|
|||
WantedBy=multi-user.target
|
||||
" > ../conf/$fpm_service
|
||||
|
||||
# Create this dedicated php-fpm service
|
||||
# Create this dedicated PHP-FPM service
|
||||
ynh_add_systemd_config --service=$fpm_service --template=$fpm_service
|
||||
# Integrate the service in YunoHost admin panel
|
||||
yunohost service add $fpm_service --log /var/log/php/fpm-php.$app.log --log_type file --description "Php-fpm dedicated to $app"
|
||||
|
@ -256,12 +262,12 @@ WantedBy=multi-user.target
|
|||
# Restart the service, as this service is either stopped or only for this app
|
||||
ynh_systemd_action --service_name=$fpm_service --action=restart
|
||||
else
|
||||
# Reload php, to not impact other parts of the system using php
|
||||
# Reload PHP, to not impact other parts of the system using PHP
|
||||
ynh_systemd_action --service_name=$fpm_service --action=reload
|
||||
fi
|
||||
}
|
||||
|
||||
# Remove the dedicated php-fpm config
|
||||
# Remove the dedicated PHP-FPM config
|
||||
#
|
||||
# usage: ynh_remove_fpm_config
|
||||
#
|
||||
|
@ -271,13 +277,13 @@ ynh_remove_fpm_config () {
|
|||
local fpm_service=$(ynh_app_setting_get --app=$app --key=fpm_service)
|
||||
local dedicated_service=$(ynh_app_setting_get --app=$app --key=fpm_dedicated_service)
|
||||
dedicated_service=${dedicated_service:-0}
|
||||
# Get the version of php used by this app
|
||||
# Get the version of PHP used by this app
|
||||
local phpversion=$(ynh_app_setting_get $app phpversion)
|
||||
|
||||
# Assume default PHP-FPM version by default
|
||||
phpversion="${phpversion:-$YNH_DEFAULT_PHP_VERSION}"
|
||||
|
||||
# Assume default php files if not set
|
||||
# Assume default PHP files if not set
|
||||
if [ -z "$fpm_config_dir" ]
|
||||
then
|
||||
fpm_config_dir="/etc/php/$YNH_DEFAULT_PHP_VERSION/fpm"
|
||||
|
@ -286,11 +292,11 @@ ynh_remove_fpm_config () {
|
|||
|
||||
if [ $dedicated_service -eq 1 ]
|
||||
then
|
||||
# Remove the dedicated service php-fpm service for the app
|
||||
# Remove the dedicated service PHP-FPM service for the app
|
||||
ynh_remove_systemd_config --service=$fpm_service
|
||||
# Remove the global php-fpm conf
|
||||
# Remove the global PHP-FPM conf
|
||||
ynh_secure_remove --file="$fpm_config_dir/php-fpm-$app.conf"
|
||||
# Remove the service from the list of services known by Yunohost
|
||||
# Remove the service from the list of services known by YunoHost
|
||||
yunohost service remove $fpm_service
|
||||
elif ynh_package_is_installed --package="php${phpversion}-fpm"; then
|
||||
ynh_systemd_action --service_name=$fpm_service --action=reload
|
||||
|
@ -302,21 +308,21 @@ ynh_remove_fpm_config () {
|
|||
ynh_secure_remove --file="$fpm_config_dir/conf.d/20-$app.ini"
|
||||
fi
|
||||
|
||||
# If the php version used is not the default version for YunoHost
|
||||
# If the PHP version used is not the default version for YunoHost
|
||||
if [ "$phpversion" != "$YNH_DEFAULT_PHP_VERSION" ]
|
||||
then
|
||||
# Remove this specific version of php
|
||||
# Remove this specific version of PHP
|
||||
ynh_remove_php
|
||||
fi
|
||||
}
|
||||
|
||||
# Install another version of php.
|
||||
# Install another version of PHP.
|
||||
#
|
||||
# [internal]
|
||||
#
|
||||
# usage: ynh_install_php --phpversion=phpversion [--package=packages]
|
||||
# | arg: -v, --phpversion= - Version of php to install.
|
||||
# | arg: -p, --package= - Additionnal php packages to install
|
||||
# | arg: -v, --phpversion= - Version of PHP to install.
|
||||
# | arg: -p, --package= - Additionnal PHP packages to install
|
||||
#
|
||||
# Requires YunoHost version 3.8.1 or higher.
|
||||
ynh_install_php () {
|
||||
|
@ -343,19 +349,19 @@ ynh_install_php () {
|
|||
# Do not add twice the same line
|
||||
if ! grep --quiet "$YNH_APP_INSTANCE_NAME:" "/etc/php/ynh_app_version"
|
||||
then
|
||||
# Store the ID of this app and the version of php requested for it
|
||||
# Store the ID of this app and the version of PHP requested for it
|
||||
echo "$YNH_APP_INSTANCE_NAME:$phpversion" | tee --append "/etc/php/ynh_app_version"
|
||||
fi
|
||||
|
||||
# Add an extra repository for those packages
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --priority=995 --name=extra_php_version
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --priority=995 --name=extra_php_version --priority=600
|
||||
|
||||
# Install requested dependencies from this extra repository.
|
||||
# Install php-fpm first, otherwise php will install apache as a dependency.
|
||||
# Install PHP-FPM first, otherwise PHP will install apache as a dependency.
|
||||
ynh_add_app_dependencies --package="php${phpversion}-fpm"
|
||||
ynh_add_app_dependencies --package="php$phpversion php${phpversion}-common $package"
|
||||
|
||||
# Set the default php version back as the default version for php-cli.
|
||||
# Set the default PHP version back as the default version for php-cli.
|
||||
update-alternatives --set php /usr/bin/php$YNH_DEFAULT_PHP_VERSION
|
||||
|
||||
# Pin this extra repository after packages are installed to prevent sury of doing shit
|
||||
|
@ -368,7 +374,7 @@ ynh_install_php () {
|
|||
yunohost service add php${phpversion}-fpm --log "/var/log/php${phpversion}-fpm.log"
|
||||
}
|
||||
|
||||
# Remove the specific version of php used by the app.
|
||||
# Remove the specific version of PHP used by the app.
|
||||
#
|
||||
# [internal]
|
||||
#
|
||||
|
@ -376,7 +382,7 @@ ynh_install_php () {
|
|||
#
|
||||
# Requires YunoHost version 3.8.1 or higher.
|
||||
ynh_remove_php () {
|
||||
# Get the version of php used by this app
|
||||
# Get the version of PHP used by this app
|
||||
local phpversion=$(ynh_app_setting_get $app phpversion)
|
||||
|
||||
if [ "$phpversion" == "$YNH_DEFAULT_PHP_VERSION" ] || [ -z "$phpversion" ]
|
||||
|
@ -394,7 +400,7 @@ ynh_remove_php () {
|
|||
# Remove the line for this app
|
||||
sed --in-place "/$YNH_APP_INSTANCE_NAME:$phpversion/d" "/etc/php/ynh_app_version"
|
||||
|
||||
# If no other app uses this version of php, remove it.
|
||||
# If no other app uses this version of PHP, remove it.
|
||||
if ! grep --quiet "$phpversion" "/etc/php/ynh_app_version"
|
||||
then
|
||||
# Remove the service from the admin panel
|
||||
|
@ -402,26 +408,26 @@ ynh_remove_php () {
|
|||
yunohost service remove php${phpversion}-fpm
|
||||
fi
|
||||
|
||||
# Purge php dependencies for this version.
|
||||
# Purge PHP dependencies for this version.
|
||||
ynh_package_autopurge "php$phpversion php${phpversion}-fpm php${phpversion}-common"
|
||||
fi
|
||||
}
|
||||
|
||||
# Define the values to configure php-fpm
|
||||
# Define the values to configure PHP-FPM
|
||||
#
|
||||
# [internal]
|
||||
#
|
||||
# usage: ynh_get_scalable_phpfpm --usage=usage --footprint=footprint [--print]
|
||||
# | arg: -f, --footprint= - Memory footprint of the service (low/medium/high).
|
||||
# low - Less than 20Mb of ram by pool.
|
||||
# medium - Between 20Mb and 40Mb of ram by pool.
|
||||
# high - More than 40Mb of ram by pool.
|
||||
# Or specify exactly the footprint, the load of the service as Mb by pool instead of having a standard value.
|
||||
# low - Less than 20 MB of RAM by pool.
|
||||
# medium - Between 20 MB and 40 MB of RAM by pool.
|
||||
# high - More than 40 MB of RAM by pool.
|
||||
# Or specify exactly the footprint, the load of the service as MB by pool instead of having a standard value.
|
||||
# To have this value, use the following command and stress the service.
|
||||
# watch -n0.5 ps -o user,cmd,%cpu,rss -u APP
|
||||
#
|
||||
# | arg: -u, --usage= - Expected usage of the service (low/medium/high).
|
||||
# low - Personal usage, behind the sso.
|
||||
# low - Personal usage, behind the SSO.
|
||||
# medium - Low usage, few people or/and publicly accessible.
|
||||
# high - High usage, frequently visited website.
|
||||
#
|
||||
|
@ -492,7 +498,7 @@ ynh_get_scalable_phpfpm () {
|
|||
|
||||
# Define pm.max_children
|
||||
# The value of pm.max_children is the total amount of ram divide by 2 and divide again by the footprint of a pool for this app.
|
||||
# So if php-fpm start the maximum of children, it won't exceed half of the ram.
|
||||
# So if PHP-FPM start the maximum of children, it won't exceed half of the ram.
|
||||
php_max_children=$(( $max_ram / 2 / $footprint ))
|
||||
# If process manager is set as static, use half less children.
|
||||
# Used as static, there's always as many children as the value of pm.max_children
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
PSQL_ROOT_PWD_FILE=/etc/yunohost/psql
|
||||
PSQL_VERSION=9.6
|
||||
PSQL_VERSION=11
|
||||
|
||||
# Open a connection as a user
|
||||
#
|
||||
|
@ -46,8 +46,13 @@ ynh_psql_execute_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
ynh_psql_connect_as --user="postgres" --password="$(cat $PSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" <<<"$sql"
|
||||
$database <<<"$sql"
|
||||
}
|
||||
|
||||
# Execute a command from a file as root user
|
||||
|
@ -67,8 +72,13 @@ ynh_psql_execute_file_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
ynh_psql_connect_as --user="postgres" --password="$(cat $PSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" <"$file"
|
||||
$database <"$file"
|
||||
}
|
||||
|
||||
# Create a database and grant optionnaly privilegies to a user
|
||||
|
@ -213,7 +223,7 @@ ynh_psql_drop_user() {
|
|||
# usage: ynh_psql_setup_db --db_user=user --db_name=name [--db_pwd=pwd]
|
||||
# | arg: -u, --db_user= - Owner of the database
|
||||
# | arg: -n, --db_name= - Name of the database
|
||||
# | arg: -p, --db_pwd= - Password of the database. If not given, a password will be generated
|
||||
# | arg: -p, --db_pwd= - Password of the database. If not provided, a password will be generated
|
||||
#
|
||||
# After executing this helper, the password of the created database will be available in $db_pwd
|
||||
# It will also be stored as "psqlpwd" into the app settings.
|
||||
|
@ -231,7 +241,7 @@ ynh_psql_setup_db() {
|
|||
|
||||
if ! ynh_psql_user_exists --user=$db_user; then
|
||||
local new_db_pwd=$(ynh_string_random) # Generate a random password
|
||||
# If $db_pwd is not given, use new_db_pwd instead for db_pwd
|
||||
# If $db_pwd is not provided, use new_db_pwd instead for db_pwd
|
||||
db_pwd="${db_pwd:-$new_db_pwd}"
|
||||
|
||||
ynh_psql_create_user "$db_user" "$db_pwd"
|
||||
|
|
|
@ -144,8 +144,13 @@ ynh_setup_source () {
|
|||
then # Use the local source file if it is present
|
||||
cp $local_src $src_filename
|
||||
else # If not, download the source
|
||||
# NB. we have to declare the var as local first,
|
||||
# otherwise 'local foo=$(false) || echo 'pwet'" does'nt work
|
||||
# because local always return 0 ...
|
||||
local out
|
||||
# Timeout option is here to enforce the timeout on dns query and tcp connect (c.f. man wget)
|
||||
local out=`wget --timeout 900 --no-verbose --output-document=$src_filename $src_url 2>&1` || ynh_print_err --message="$out"
|
||||
out=$(wget --tries 3 --no-dns-cache --timeout 900 --no-verbose --output-document=$src_filename $src_url 2>&1) \
|
||||
|| ynh_die --message="$out"
|
||||
fi
|
||||
|
||||
# Check the control sum
|
||||
|
|
|
@ -10,7 +10,8 @@ source /usr/share/yunohost/helpers
|
|||
backup_dir="${1}/conf/ldap"
|
||||
|
||||
# Backup the configuration
|
||||
ynh_backup "/etc/ldap/slapd.conf" "${backup_dir}/slapd.conf"
|
||||
ynh_backup "/etc/ldap/ldap.conf" "${backup_dir}/ldap.conf"
|
||||
ynh_backup "/etc/ldap/slapd.ldif" "${backup_dir}/slapd.ldif"
|
||||
slapcat -b cn=config -l "${backup_dir}/cn=config.master.ldif"
|
||||
|
||||
# Backup the database
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Exit hook on subcommand error or unset variable
|
||||
set -eu
|
||||
|
||||
# Source YNH helpers
|
||||
source /usr/share/yunohost/helpers
|
||||
|
||||
# Backup destination
|
||||
backup_dir="${1}/conf/ynh/mysql"
|
||||
|
||||
# Save MySQL root password
|
||||
ynh_backup "/etc/yunohost/mysql" "${backup_dir}/root_pwd"
|
|
@ -67,6 +67,24 @@ EOF
|
|||
# (this make sure that the hash is null / file is flagged as to-delete)
|
||||
mkdir -p $pending_dir/etc/etckeeper
|
||||
touch $pending_dir/etc/etckeeper/etckeeper.conf
|
||||
|
||||
# Skip ntp if inside a container (inspired from the conf of systemd-timesyncd)
|
||||
mkdir -p ${pending_dir}/etc/systemd/system/ntp.service.d/
|
||||
echo "
|
||||
[Unit]
|
||||
ConditionCapability=CAP_SYS_TIME
|
||||
ConditionVirtualization=!container
|
||||
" > ${pending_dir}/etc/systemd/system/ntp.service.d/ynh-override.conf
|
||||
|
||||
# Make nftable conflict with yunohost-firewall
|
||||
mkdir -p ${pending_dir}/etc/systemd/system/nftables.service.d/
|
||||
cat > ${pending_dir}/etc/systemd/system/nftables.service.d/ynh-override.conf << EOF
|
||||
[Unit]
|
||||
# yunohost-firewall and nftables conflict with each other
|
||||
Conflicts=yunohost-firewall.service
|
||||
ConditionFileIsExecutable=!/etc/init.d/yunohost-firewall
|
||||
ConditionPathExists=!/etc/systemd/system/multi-user.target.wants/yunohost-firewall.service
|
||||
EOF
|
||||
}
|
||||
|
||||
do_post_regen() {
|
||||
|
@ -91,6 +109,9 @@ do_post_regen() {
|
|||
[[ ! -e /etc/yunohost/hooks.d ]] || (chown root /etc/yunohost/hooks.d && chmod 700 /etc/yunohost/hooks.d)
|
||||
[[ ! -e /etc/yunohost/apps ]] || (chown root /etc/yunohost/apps && chmod 700 /etc/yunohost/apps)
|
||||
|
||||
# Propagates changes in systemd service config overrides
|
||||
[[ ! "$regen_conf_files" =~ "ntp.service.d/ynh-override.conf" ]] || { systemctl daemon-reload; systemctl restart ntp; }
|
||||
[[ ! "$regen_conf_files" =~ "nftables.service.d/ynh-override.conf" ]] || systemctl daemon-reload
|
||||
}
|
||||
|
||||
_update_services() {
|
||||
|
|
|
@ -69,12 +69,11 @@ do_init_regen() {
|
|||
-out "${ssl_dir}/certs/yunohost_crt.pem" \
|
||||
-batch >>$LOGFILE 2>&1
|
||||
|
||||
last_cert=$(ls $ssl_dir/newcerts/*.pem | sort -V | tail -n 1)
|
||||
chmod 640 "${ssl_dir}/certs/yunohost_key.pem"
|
||||
chmod 640 "$last_cert"
|
||||
chmod 640 "${ssl_dir}/certs/yunohost_crt.pem"
|
||||
|
||||
cp "${ssl_dir}/certs/yunohost_key.pem" "$ynh_key"
|
||||
cp "$last_cert" "$ynh_crt"
|
||||
cp "${ssl_dir}/certs/yunohost_crt.pem" "$ynh_crt"
|
||||
ln -sf "$ynh_crt" /etc/ssl/certs/yunohost_crt.pem
|
||||
ln -sf "$ynh_key" /etc/ssl/private/yunohost_key.pem
|
||||
fi
|
||||
|
|
|
@ -12,27 +12,52 @@ do_init_regen() {
|
|||
|
||||
do_pre_regen ""
|
||||
|
||||
# fix some permissions
|
||||
chown root:openldap /etc/ldap/slapd.conf
|
||||
systemctl daemon-reload
|
||||
|
||||
_regenerate_slapd_conf
|
||||
|
||||
# Enforce permissions
|
||||
chown root:openldap /etc/ldap/slapd.ldif
|
||||
chown -R openldap:openldap /etc/ldap/schema/
|
||||
usermod -aG ssl-cert openldap
|
||||
|
||||
# check the slapd config file at first
|
||||
slaptest -Q -u -f /etc/ldap/slapd.conf
|
||||
|
||||
# regenerate LDAP config directory from slapd.conf
|
||||
rm -Rf /etc/ldap/slapd.d
|
||||
mkdir /etc/ldap/slapd.d
|
||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d/ 2>&1
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
|
||||
service slapd restart
|
||||
}
|
||||
|
||||
_regenerate_slapd_conf() {
|
||||
|
||||
# Validate the new slapd config
|
||||
# To do so, we have to use the .ldif to generate the config directory
|
||||
# so we use a temporary directory slapd_new.d
|
||||
rm -Rf /etc/ldap/slapd_new.d
|
||||
mkdir /etc/ldap/slapd_new.d
|
||||
slapadd -n0 -l /etc/ldap/slapd.ldif -F /etc/ldap/slapd_new.d/ 2>&1
|
||||
# Actual validation (-Q is for quiet, -u is for dry-run)
|
||||
slaptest -Q -u -F /etc/ldap/slapd_new.d
|
||||
|
||||
# "Commit" / apply the new config (meaning we delete the old one and replace
|
||||
# it with the new one)
|
||||
rm -Rf /etc/ldap/slapd.d
|
||||
mv /etc/ldap/slapd_new.d /etc/ldap/slapd.d
|
||||
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
}
|
||||
|
||||
do_pre_regen() {
|
||||
pending_dir=$1
|
||||
|
||||
cd /usr/share/yunohost/templates/slapd
|
||||
# remove temporary backup file
|
||||
rm -f "$tmp_backup_dir_file"
|
||||
|
||||
# Define if we need to migrate from hdb to mdb
|
||||
curr_backend=$(grep '^database' /etc/ldap/slapd.conf 2>/dev/null | awk '{print $2}')
|
||||
if [ -e /etc/ldap/slapd.conf ] && [ -n "$curr_backend" ] && \
|
||||
[ $curr_backend != 'mdb' ]; then
|
||||
backup_dir="/var/backups/dc=yunohost,dc=org-${curr_backend}-$(date +%s)"
|
||||
mkdir -p "$backup_dir"
|
||||
slapcat -b dc=yunohost,dc=org -l "${backup_dir}/dc=yunohost-dc=org.ldif"
|
||||
echo "$backup_dir" > "$tmp_backup_dir_file"
|
||||
fi
|
||||
|
||||
# create needed directories
|
||||
ldap_dir="${pending_dir}/etc/ldap"
|
||||
|
@ -40,28 +65,15 @@ do_pre_regen() {
|
|||
mkdir -p "$ldap_dir" "$schema_dir"
|
||||
|
||||
# remove legacy configuration file
|
||||
[ ! -f /etc/ldap/slapd-yuno.conf ] \
|
||||
|| touch "${pending_dir}/etc/ldap/slapd-yuno.conf"
|
||||
[ ! -f /etc/ldap/slapd-yuno.conf ] || touch "${ldap_dir}/slapd-yuno.conf"
|
||||
[ ! -f /etc/ldap/slapd.conf ] || touch "${ldap_dir}/slapd.conf"
|
||||
[ ! -f /etc/ldap/schema/yunohost.schema ] || touch "${schema_dir}/yunohost.schema"
|
||||
|
||||
# remove temporary backup file
|
||||
rm -f "$tmp_backup_dir_file"
|
||||
|
||||
# retrieve current and new backends
|
||||
curr_backend=$(grep '^database' /etc/ldap/slapd.conf 2>/dev/null | awk '{print $2}')
|
||||
new_backend=$(grep '^database' slapd.conf | awk '{print $2}')
|
||||
|
||||
# save current database before any conf changes
|
||||
if [[ -n "$curr_backend" && "$curr_backend" != "$new_backend" ]]; then
|
||||
backup_dir="/var/backups/dc=yunohost,dc=org-${curr_backend}-$(date +%s)"
|
||||
mkdir -p "$backup_dir"
|
||||
slapcat -b dc=yunohost,dc=org \
|
||||
-l "${backup_dir}/dc=yunohost-dc=org.ldif"
|
||||
echo "$backup_dir" > "$tmp_backup_dir_file"
|
||||
fi
|
||||
cd /usr/share/yunohost/templates/slapd
|
||||
|
||||
# copy configuration files
|
||||
cp -a ldap.conf slapd.conf "$ldap_dir"
|
||||
cp -a sudo.schema mailserver.schema yunohost.schema "$schema_dir"
|
||||
cp -a ldap.conf slapd.ldif "$ldap_dir"
|
||||
cp -a sudo.ldif mailserver.ldif permission.ldif "$schema_dir"
|
||||
|
||||
mkdir -p ${pending_dir}/etc/systemd/system/slapd.service.d/
|
||||
cp systemd-override.conf ${pending_dir}/etc/systemd/system/slapd.service.d/ynh-override.conf
|
||||
|
@ -72,14 +84,11 @@ do_pre_regen() {
|
|||
do_post_regen() {
|
||||
regen_conf_files=$1
|
||||
|
||||
# ensure that slapd.d exists
|
||||
mkdir -p /etc/ldap/slapd.d
|
||||
|
||||
# fix some permissions
|
||||
echo "Making sure we have the right permissions needed ..."
|
||||
echo "Enforce permissions on ldap/slapd directories and certs ..."
|
||||
# penldap user should be in the ssl-cert group to let it access the certificate for TLS
|
||||
usermod -aG ssl-cert openldap
|
||||
chown root:openldap /etc/ldap/slapd.conf
|
||||
chown root:openldap /etc/ldap/slapd.ldif
|
||||
chown -R openldap:openldap /etc/ldap/schema/
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
|
||||
|
@ -92,29 +101,17 @@ do_post_regen() {
|
|||
|
||||
[ -z "$regen_conf_files" ] && exit 0
|
||||
|
||||
# check the slapd config file at first
|
||||
slaptest -Q -u -f /etc/ldap/slapd.conf
|
||||
# regenerate LDAP config directory from slapd.conf
|
||||
echo "Regenerate LDAP config directory from slapd.ldif"
|
||||
_regenerate_slapd_conf
|
||||
|
||||
# check if a backup should be restored
|
||||
# If there's a backup, re-import its data
|
||||
backup_dir=$(cat "$tmp_backup_dir_file" 2>/dev/null || true)
|
||||
if [[ -n "$backup_dir" && -f "${backup_dir}/dc=yunohost-dc=org.ldif" ]]; then
|
||||
# regenerate LDAP config directory and import database as root
|
||||
# since the admin user may be unavailable
|
||||
echo "Regenerate LDAP config directory and import the database using slapadd"
|
||||
sh -c "rm -Rf /etc/ldap/slapd.d;
|
||||
mkdir /etc/ldap/slapd.d;
|
||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d;
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d;
|
||||
slapadd -F /etc/ldap/slapd.d -b dc=yunohost,dc=org \
|
||||
-l '${backup_dir}/dc=yunohost-dc=org.ldif';
|
||||
chown -R openldap:openldap /var/lib/ldap" 2>&1
|
||||
else
|
||||
# regenerate LDAP config directory from slapd.conf
|
||||
echo "Regenerate LDAP config directory from slapd.conf"
|
||||
rm -Rf /etc/ldap/slapd.d
|
||||
mkdir /etc/ldap/slapd.d
|
||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d/ 2>&1
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
echo "Import the database using slapadd"
|
||||
slapadd -F /etc/ldap/slapd.d -b dc=yunohost,dc=org -l "${backup_dir}/dc=yunohost-dc=org.ldif"
|
||||
chown -R openldap:openldap /var/lib/ldap 2>&1
|
||||
fi
|
||||
|
||||
echo "Running slapdindex"
|
||||
|
@ -154,6 +151,9 @@ case "$1" in
|
|||
init)
|
||||
do_init_regen
|
||||
;;
|
||||
apply_config)
|
||||
do_post_regen /etc/ldap/slapd.ldif
|
||||
;;
|
||||
*)
|
||||
echo "hook called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
|
|
|
@ -26,6 +26,9 @@ do_init_regen() {
|
|||
ynh_render_template "security.conf.inc" "${nginx_conf_dir}/security.conf.inc"
|
||||
ynh_render_template "yunohost_admin.conf" "${nginx_conf_dir}/yunohost_admin.conf"
|
||||
|
||||
mkdir -p $nginx_conf_dir/default.d/
|
||||
cp "redirect_to_admin.conf" $nginx_conf_dir/default.d/
|
||||
|
||||
# Restart nginx if conf looks good, otherwise display error and exit unhappy
|
||||
nginx -t 2>/dev/null || { nginx -t; exit 1; }
|
||||
systemctl restart nginx || { journalctl --no-pager --lines=10 -u nginx >&2; exit 1; }
|
||||
|
@ -77,6 +80,8 @@ do_pre_regen() {
|
|||
done
|
||||
|
||||
ynh_render_template "yunohost_admin.conf" "${nginx_conf_dir}/yunohost_admin.conf"
|
||||
mkdir -p $nginx_conf_dir/default.d/
|
||||
cp "redirect_to_admin.conf" $nginx_conf_dir/default.d/
|
||||
|
||||
# remove old domain conf files
|
||||
conf_files=$(ls -1 /etc/nginx/conf.d \
|
||||
|
|
|
@ -42,6 +42,8 @@ do_post_regen() {
|
|||
chown _rspamd /etc/dkim/*.mail.key
|
||||
chmod 400 /etc/dkim/*.mail.key
|
||||
|
||||
[ ! -e /var/log/rspamd ] || chown -R _rspamd:_rspamd /var/log/rspamd
|
||||
|
||||
regen_conf_files=$1
|
||||
[ -z "$regen_conf_files" ] && exit 0
|
||||
|
||||
|
|
|
@ -20,9 +20,11 @@ do_post_regen() {
|
|||
# Playing with enable/disable allows to recreate the proper symlinks.
|
||||
if [ ! -e /etc/systemd/system/mysql.service ]
|
||||
then
|
||||
systemctl stop mysql -q
|
||||
systemctl disable mysql -q
|
||||
systemctl disable mariadb -q
|
||||
systemctl enable mariadb -q
|
||||
systemctl is-active mariadb -q || systemctl start mariadb
|
||||
fi
|
||||
|
||||
if [ ! -f /etc/yunohost/mysql ]; then
|
||||
|
|
|
@ -69,8 +69,16 @@ do_post_regen() {
|
|||
short_hostname=$(hostname -s)
|
||||
grep -q "127.0.0.1.*$short_hostname" /etc/hosts || echo -e "\n127.0.0.1\t$short_hostname" >>/etc/hosts
|
||||
|
||||
[[ -z "$regen_conf_files" ]] \
|
||||
|| service dnsmasq restart
|
||||
[[ -n "$regen_conf_files" ]] || return
|
||||
|
||||
# Remove / disable services likely to conflict with dnsmasq
|
||||
for SERVICE in systemd-resolved bind9
|
||||
do
|
||||
systemctl is-enabled $SERVICE &>/dev/null && systemctl disable $SERVICE 2>/dev/null
|
||||
systemctl is-active $SERVICE &>/dev/null && systemctl stop $SERVICE
|
||||
done
|
||||
|
||||
systemctl restart dnsmasq
|
||||
}
|
||||
|
||||
FORCE=${2:-0}
|
||||
|
|
|
@ -39,6 +39,9 @@ else
|
|||
# Restore the configuration
|
||||
mv /etc/ldap/slapd.d "$TMPDIR"
|
||||
mkdir -p /etc/ldap/slapd.d
|
||||
cp -a "${backup_dir}/ldap.conf" /etc/ldap/ldap.conf
|
||||
cp -a "${backup_dir}/slapd.ldif" /etc/ldap/slapd.ldif
|
||||
# Legacy thing but we need it to force the regen-conf in case of it exist
|
||||
cp -a "${backup_dir}/slapd.conf" /etc/ldap/slapd.conf
|
||||
slapadd -F /etc/ldap/slapd.d -b cn=config \
|
||||
-l "${backup_dir}/cn=config.master.ldif" \
|
||||
|
|
|
@ -1,42 +1,5 @@
|
|||
backup_dir="$1/conf/ynh/mysql"
|
||||
MYSQL_PKG="$(dpkg --list | sed -ne 's/^ii \(mariadb-server-[[:digit:].]\+\) .*$/\1/p')"
|
||||
# We don't backup/restore mysql password anymore
|
||||
# c.f. https://github.com/YunoHost/yunohost/pull/912
|
||||
|
||||
. /usr/share/yunohost/helpers
|
||||
|
||||
# ensure that mysql is running
|
||||
service mysql status >/dev/null 2>&1 \
|
||||
|| service mysql start
|
||||
|
||||
# retrieve current and new password
|
||||
[ -f /etc/yunohost/mysql ] \
|
||||
&& curr_pwd=$(cat /etc/yunohost/mysql)
|
||||
new_pwd=$(cat "${backup_dir}/root_pwd" || cat "${backup_dir}/mysql")
|
||||
[ -z "$curr_pwd" ] && curr_pwd="yunohost"
|
||||
[ -z "$new_pwd" ] && {
|
||||
new_pwd=$(ynh_string_random 10)
|
||||
}
|
||||
|
||||
# attempt to change it
|
||||
mysqladmin -s -u root -p"$curr_pwd" password "$new_pwd" || {
|
||||
|
||||
echo "It seems that you have already configured MySQL." \
|
||||
"YunoHost needs to have a root access to MySQL to runs its" \
|
||||
"applications, and is going to reset the MySQL root password." \
|
||||
"You can find this new password in /etc/yunohost/mysql." >&2
|
||||
|
||||
# set new password with debconf
|
||||
debconf-set-selections << EOF
|
||||
$MYSQL_PKG mysql-server/root_password password $new_pwd
|
||||
$MYSQL_PKG mysql-server/root_password_again password $new_pwd
|
||||
EOF
|
||||
|
||||
# reconfigure Debian package
|
||||
dpkg-reconfigure -freadline -u "$MYSQL_PKG" 2>&1
|
||||
}
|
||||
|
||||
# store new root password
|
||||
echo "$new_pwd" | tee /etc/yunohost/mysql
|
||||
chmod 400 /etc/yunohost/mysql
|
||||
|
||||
# reload the grant tables
|
||||
mysqladmin -s -u root -p"$new_pwd" reload
|
||||
# This is a dummy empty file as a workaround for
|
||||
# https://github.com/YunoHost/issues/issues/1553 until it is fixed
|
||||
|
|
8
data/other/ffdhe2048.pem
Normal file
8
data/other/ffdhe2048.pem
Normal file
|
@ -0,0 +1,8 @@
|
|||
-----BEGIN DH PARAMETERS-----
|
||||
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
|
||||
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
|
||||
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
|
||||
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
|
||||
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
|
||||
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
|
||||
-----END DH PARAMETERS-----
|
|
@ -14,18 +14,19 @@ mail_plugins = $mail_plugins quota
|
|||
|
||||
###############################################################################
|
||||
|
||||
# generated 2020-04-03, Mozilla Guideline v5.4, Dovecot 2.2.27, OpenSSL 1.1.0l, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=dovecot&version=2.2.27&config=intermediate&openssl=1.1.0l&guideline=5.4
|
||||
# generated 2020-08-18, Mozilla Guideline v5.6, Dovecot 2.3.4, OpenSSL 1.1.1d, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=dovecot&version=2.3.4&config=intermediate&openssl=1.1.1d&guideline=5.6
|
||||
|
||||
ssl = required
|
||||
|
||||
ssl_cert = </etc/yunohost/certs/{{ main_domain }}/crt.pem
|
||||
ssl_key = </etc/yunohost/certs/{{ main_domain }}/key.pem
|
||||
|
||||
ssl_dh_parameters_length = 2048
|
||||
# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam
|
||||
ssl_dh = /usr/share/yunohost/other/ffdhe2048.pem;
|
||||
|
||||
# intermediate configuration
|
||||
ssl_protocols = TLSv1.2
|
||||
ssl_min_protocol = TLSv1.2
|
||||
ssl_cipher_list = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
|
||||
ssl_prefer_server_ciphers = no
|
||||
|
||||
|
|
3
data/templates/nginx/redirect_to_admin.conf
Normal file
3
data/templates/nginx/redirect_to_admin.conf
Normal file
|
@ -0,0 +1,3 @@
|
|||
location / {
|
||||
return 302 https://$http_host/yunohost/admin;
|
||||
}
|
|
@ -2,21 +2,25 @@ ssl_session_timeout 1d;
|
|||
ssl_session_cache shared:SSL:50m; # about 200000 sessions
|
||||
ssl_session_tickets off;
|
||||
|
||||
# nginx 1.10 in stretch doesn't support TLS1.3 and Mozilla doesn't have any
|
||||
# "modern" config recommendation with it.
|
||||
# So until buster the modern conf is same as intermediate
|
||||
{% if compatibility == "modern" %} {% else %} {% endif %}
|
||||
|
||||
{% if compatibility == "modern" %}
|
||||
# generated 2020-08-14, Mozilla Guideline v5.6, nginx 1.14.2, OpenSSL 1.1.1d, modern configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=modern&openssl=1.1.1d&guideline=5.6
|
||||
ssl_protocols TLSv1.3;
|
||||
ssl_prefer_server_ciphers off;
|
||||
{% else %}
|
||||
# Ciphers with intermediate compatibility
|
||||
# generated 2020-04-03, Mozilla Guideline v5.4, nginx 1.10.3, OpenSSL 1.1.0l, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.10.3&config=intermediate&openssl=1.1.0l&guideline=5.4
|
||||
ssl_protocols TLSv1.2;
|
||||
# generated 2020-08-14, Mozilla Guideline v5.6, nginx 1.14.2, OpenSSL 1.1.1d, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.6
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
|
||||
ssl_prefer_server_ciphers off;
|
||||
|
||||
# Uncomment the following directive after DH generation
|
||||
# > openssl dhparam -out /etc/ssl/private/dh2048.pem -outform PEM -2 2048
|
||||
#ssl_dhparam /etc/ssl/private/dh2048.pem;
|
||||
# Pre-defined FFDHE group (RFC 7919)
|
||||
# From https://ssl-config.mozilla.org/ffdhe2048.txt
|
||||
# https://security.stackexchange.com/a/149818
|
||||
ssl_dhparam /usr/share/yunohost/other/ffdhe2048.pem;
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners
|
||||
# https://wiki.mozilla.org/Security/Guidelines/Web_Security
|
||||
|
|
|
@ -2,13 +2,7 @@ server {
|
|||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
location / {
|
||||
return 302 https://$http_host/yunohost/admin;
|
||||
}
|
||||
|
||||
location /yunohost/admin {
|
||||
return 301 https://$http_host$request_uri;
|
||||
}
|
||||
include /etc/nginx/conf.d/default.d/*.conf;
|
||||
}
|
||||
|
||||
server {
|
||||
|
@ -23,21 +17,12 @@ server {
|
|||
more_set_headers "Strict-Transport-Security : max-age=63072000; includeSubDomains; preload";
|
||||
more_set_headers "Referrer-Policy : 'same-origin'";
|
||||
|
||||
location / {
|
||||
return 302 https://$http_host/yunohost/admin;
|
||||
}
|
||||
|
||||
location /yunohost {
|
||||
# Block crawlers bot
|
||||
if ($http_user_agent ~ (crawl|Googlebot|Slurp|spider|bingbot|tracker|click|parser|spider|facebookexternalhit) ) {
|
||||
return 403;
|
||||
}
|
||||
# X-Robots-Tag to precise the rules applied.
|
||||
add_header X-Robots-Tag "nofollow, noindex, noarchive, nosnippet";
|
||||
# Redirect most of 404 to maindomain.tld/yunohost/sso
|
||||
access_by_lua_file /usr/share/ssowat/access.lua;
|
||||
}
|
||||
|
||||
include /etc/nginx/conf.d/yunohost_admin.conf.inc;
|
||||
include /etc/nginx/conf.d/yunohost_api.conf.inc;
|
||||
include /etc/nginx/conf.d/default.d/*.conf;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,18 @@ base dc=yunohost,dc=org
|
|||
# The LDAP protocol version to use.
|
||||
#ldap_version 3
|
||||
|
||||
# The DN to bind with for normal lookups.
|
||||
#binddn cn=annonymous,dc=example,dc=net
|
||||
#bindpw secret
|
||||
|
||||
# The DN used for password modifications by root.
|
||||
#rootpwmoddn cn=admin,dc=example,dc=com
|
||||
|
||||
# SSL options
|
||||
#ssl off
|
||||
#tls_reqcert never
|
||||
tls_cacertfile /etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
# The search scope.
|
||||
#scope sub
|
||||
|
||||
|
|
|
@ -1,12 +1,8 @@
|
|||
# /etc/nsswitch.conf
|
||||
#
|
||||
# Example configuration of GNU Name Service Switch functionality.
|
||||
# If you have the `glibc-doc-reference' and `info' packages installed, try:
|
||||
# `info libc "Name Service Switch"' for information about this file.
|
||||
|
||||
passwd: compat ldap
|
||||
group: compat ldap
|
||||
shadow: compat ldap
|
||||
passwd: files systemd ldap
|
||||
group: files systemd ldap
|
||||
shadow: files ldap
|
||||
gshadow: files
|
||||
|
||||
hosts: files myhostname mdns4_minimal [NOTFOUND=return] dns
|
||||
|
|
|
@ -19,34 +19,35 @@ readme_directory = no
|
|||
|
||||
# -- TLS for incoming connections
|
||||
###############################################################################
|
||||
# generated 2020-04-03, Mozilla Guideline v5.4, Postfix 3.1.14, OpenSSL 1.1.0l, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=postfix&version=3.1.14&config=intermediate&openssl=1.1.0l&guideline=5.4
|
||||
|
||||
# (No modern conf support until we're on buster...)
|
||||
# {% if compatibility == "intermediate" %} {% else %} {% endif %}
|
||||
|
||||
smtpd_use_tls = yes
|
||||
|
||||
smtpd_tls_security_level = may
|
||||
smtpd_tls_auth_only = yes
|
||||
smtpd_tls_cert_file = /etc/yunohost/certs/{{ main_domain }}/crt.pem
|
||||
smtpd_tls_key_file = /etc/yunohost/certs/{{ main_domain }}/key.pem
|
||||
|
||||
{% if compatibility == "intermediate" %}
|
||||
# generated 2020-08-18, Mozilla Guideline v5.6, Postfix 3.4.14, OpenSSL 1.1.1d, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=postfix&version=3.4.14&config=intermediate&openssl=1.1.1d&guideline=5.6
|
||||
|
||||
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
smtpd_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
|
||||
# smtpd_tls_mandatory_ciphers = medium # (c.f. below)
|
||||
smtpd_tls_mandatory_ciphers = medium
|
||||
|
||||
# curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam.pem
|
||||
# not actually 1024 bits, this applies to all DHE >= 1024 bits
|
||||
# smtpd_tls_dh1024_param_file = /path/to/dhparam.pem
|
||||
smtpd_tls_dh1024_param_file = /usr/share/yunohost/other/ffdhe2048.pem;
|
||||
|
||||
tls_medium_cipherlist = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
|
||||
{% else %}
|
||||
# generated 2020-08-18, Mozilla Guideline v5.6, Postfix 3.4.14, OpenSSL 1.1.1d, modern configuration
|
||||
# https://ssl-config.mozilla.org/#server=postfix&version=3.4.14&config=modern&openssl=1.1.1d&guideline=5.6
|
||||
|
||||
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1, !TLSv1.2
|
||||
smtpd_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1, !TLSv1.2
|
||||
{% else %}
|
||||
|
||||
# This custom medium cipherlist recommendation only works if we have a DH ... which we don't, c.f. https://github.com/YunoHost/issues/issues/93
|
||||
# On the other hand, the postfix doc strongly discourage tweaking this list ... So whatever, let's keep the mandatory_ciphers to high like we did before applying the Mozilla recommendation ...
|
||||
#tls_medium_cipherlist = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
|
||||
tls_preempt_cipherlist = no
|
||||
|
||||
# Custom Yunohost stuff ... because we can't use the recommendation about medium cipher list ...
|
||||
smtpd_tls_mandatory_ciphers=high
|
||||
smtpd_tls_eecdh_grade = ultra
|
||||
###############################################################################
|
||||
smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache
|
||||
smtpd_tls_loglevel=1
|
||||
|
@ -170,7 +171,7 @@ smtpd_milters = inet:localhost:11332
|
|||
milter_default_action = accept
|
||||
|
||||
# Avoid to send simultaneously too many emails
|
||||
smtp_destination_concurrency_limit = 1
|
||||
smtp_destination_concurrency_limit = 2
|
||||
default_destination_rate_delay = 5s
|
||||
|
||||
# Avoid email adress scanning
|
||||
|
|
|
@ -2,58 +2,62 @@
|
|||
## Version 0.1
|
||||
## Adrien Beudin
|
||||
|
||||
dn: cn=mailserver,cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: mailserver
|
||||
#
|
||||
# Attributes
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.1
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.1
|
||||
NAME 'maildrop'
|
||||
DESC 'Mail addresses where mails are forwarded -- ie forwards'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.2
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.2
|
||||
NAME 'mailalias'
|
||||
DESC 'Mail addresses accepted by this account -- ie aliases'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.3
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.3
|
||||
NAME 'mailenable'
|
||||
DESC 'Mail Account validity'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{8})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.4
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.4
|
||||
NAME 'mailbox'
|
||||
DESC 'Mailbox path where mails are delivered'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.5
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.5
|
||||
NAME 'virtualdomain'
|
||||
DESC 'A mail domain name'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.6
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.6
|
||||
NAME 'virtualdomaindescription'
|
||||
DESC 'Virtual domain description'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.7
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.7
|
||||
NAME 'mailuserquota'
|
||||
DESC 'Mailbox quota for a user'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{16} SINGLE-VALUE )
|
||||
|
||||
#
|
||||
# Mail Account Objectclass
|
||||
objectclass ( 1.3.6.1.4.1.40328.1.1.2.1
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.40328.1.1.2.1
|
||||
NAME 'mailAccount'
|
||||
DESC 'Mail Account'
|
||||
SUP top
|
||||
|
@ -65,9 +69,9 @@ objectclass ( 1.3.6.1.4.1.40328.1.1.2.1
|
|||
mailalias $ maildrop $ mailenable $ mailbox $ mailuserquota
|
||||
)
|
||||
)
|
||||
|
||||
#
|
||||
# Mail Domain Objectclass
|
||||
objectclass ( 1.3.6.1.4.1.40328.1.1.2.2
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.40328.1.1.2.2
|
||||
NAME 'mailDomain'
|
||||
DESC 'Domain mail entry'
|
||||
SUP top
|
||||
|
@ -79,9 +83,9 @@ objectclass ( 1.3.6.1.4.1.40328.1.1.2.2
|
|||
virtualdomaindescription $ mailuserquota
|
||||
)
|
||||
)
|
||||
|
||||
#
|
||||
# Mail Group Objectclass
|
||||
objectclass ( 1.3.6.1.4.1.40328.1.1.2.3
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.40328.1.1.2.3
|
||||
NAME 'mailGroup' SUP top AUXILIARY
|
||||
DESC 'Mail Group'
|
||||
MUST ( mail )
|
|
@ -1,48 +1,50 @@
|
|||
#dn: cn=yunohost,cn=schema,cn=config
|
||||
#objectClass: olcSchemaConfig
|
||||
#cn: yunohost
|
||||
# Yunohost schema for group and permission support
|
||||
|
||||
dn: cn=yunohost,cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: yunohost
|
||||
# ATTRIBUTES
|
||||
# For Permission
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.1 NAME 'permission'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.1 NAME 'permission'
|
||||
DESC 'Yunohost permission on user and group side'
|
||||
SUP distinguishedName )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.2 NAME 'groupPermission'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.2 NAME 'groupPermission'
|
||||
DESC 'Yunohost permission for a group on permission side'
|
||||
SUP distinguishedName )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.3 NAME 'inheritPermission'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.3 NAME 'inheritPermission'
|
||||
DESC 'Yunohost permission for user on permission side'
|
||||
SUP distinguishedName )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.4 NAME 'URL'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.4 NAME 'URL'
|
||||
DESC 'Yunohost permission main URL'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} SINGLE-VALUE )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.5 NAME 'additionalUrls'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.5 NAME 'additionalUrls'
|
||||
DESC 'Yunohost permission additionnal URL'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.6 NAME 'authHeader'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.6 NAME 'authHeader'
|
||||
DESC 'Yunohost application, enable authentication header'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.7 NAME 'label'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.7 NAME 'label'
|
||||
DESC 'Yunohost permission label, also used for the tile name in the SSO'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} SINGLE-VALUE )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.8 NAME 'showTile'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.8 NAME 'showTile'
|
||||
DESC 'Yunohost application, show/hide the tile in the SSO for this permission'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.9 NAME 'isProtected'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.9 NAME 'isProtected'
|
||||
DESC 'Yunohost application permission protection'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
|
||||
# OBJECTCLASS
|
||||
# For Applications
|
||||
objectclass ( 1.3.6.1.4.1.17953.9.2.1 NAME 'groupOfNamesYnh'
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.17953.9.2.1 NAME 'groupOfNamesYnh'
|
||||
DESC 'Yunohost user group'
|
||||
SUP top AUXILIARY
|
||||
MAY ( member $ businessCategory $ seeAlso $ owner $ ou $ o $ permission ) )
|
||||
objectclass ( 1.3.6.1.4.1.17953.9.2.2 NAME 'permissionYnh'
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.17953.9.2.2 NAME 'permissionYnh'
|
||||
DESC 'a Yunohost application'
|
||||
SUP top AUXILIARY
|
||||
MUST ( cn $ authHeader $ label $ showTile $ isProtected )
|
||||
MAY ( groupPermission $ inheritPermission $ URL $ additionalUrls ) )
|
||||
# For User
|
||||
objectclass ( 1.3.6.1.4.1.17953.9.2.3 NAME 'userPermissionYnh'
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.17953.9.2.3 NAME 'userPermissionYnh'
|
||||
DESC 'a Yunohost application'
|
||||
SUP top AUXILIARY
|
||||
MAY ( permission ) )
|
|
@ -1,154 +0,0 @@
|
|||
# This is the main slapd configuration file. See slapd.conf(5) for more
|
||||
# info on the configuration options.
|
||||
|
||||
#######################################################################
|
||||
# Global Directives:
|
||||
|
||||
# Features to permit
|
||||
#allow bind_v2
|
||||
|
||||
# Schema and objectClass definitions
|
||||
include /etc/ldap/schema/core.schema
|
||||
include /etc/ldap/schema/cosine.schema
|
||||
include /etc/ldap/schema/nis.schema
|
||||
include /etc/ldap/schema/inetorgperson.schema
|
||||
include /etc/ldap/schema/mailserver.schema
|
||||
include /etc/ldap/schema/sudo.schema
|
||||
include /etc/ldap/schema/yunohost.schema
|
||||
|
||||
# Where the pid file is put. The init.d script
|
||||
# will not stop the server if you change this.
|
||||
pidfile /var/run/slapd/slapd.pid
|
||||
|
||||
# List of arguments that were passed to the server
|
||||
argsfile /var/run/slapd/slapd.args
|
||||
|
||||
# Read slapd.conf(5) for possible values
|
||||
loglevel none
|
||||
|
||||
# Hashes to be used in generation of user passwords
|
||||
password-hash {SSHA}
|
||||
|
||||
# Where the dynamically loaded modules are stored
|
||||
modulepath /usr/lib/ldap
|
||||
moduleload back_mdb
|
||||
moduleload memberof
|
||||
|
||||
# The maximum number of entries that is returned for a search operation
|
||||
sizelimit 500
|
||||
|
||||
# The tool-threads parameter sets the actual amount of cpu's that is used
|
||||
# for indexing.
|
||||
tool-threads 1
|
||||
|
||||
# TLS Support
|
||||
TLSCertificateFile /etc/yunohost/certs/yunohost.org/crt.pem
|
||||
TLSCertificateKeyFile /etc/yunohost/certs/yunohost.org/key.pem
|
||||
|
||||
#######################################################################
|
||||
# Specific Backend Directives for mdb:
|
||||
# Backend specific directives apply to this backend until another
|
||||
# 'backend' directive occurs
|
||||
backend mdb
|
||||
|
||||
#######################################################################
|
||||
# Specific Directives for database #1, of type mdb:
|
||||
# Database specific directives apply to this databasse until another
|
||||
# 'database' directive occurs
|
||||
database mdb
|
||||
|
||||
# The base of your directory in database #1
|
||||
suffix "dc=yunohost,dc=org"
|
||||
|
||||
# rootdn directive for specifying a superuser on the database. This is needed
|
||||
# for syncrepl.
|
||||
# rootdn "cn=admin,dc=yunohost,dc=org"
|
||||
|
||||
# Where the database file are physically stored for database #1
|
||||
directory "/var/lib/ldap"
|
||||
|
||||
# Indexing options for database #1
|
||||
index objectClass eq
|
||||
index uid,sudoUser eq,sub
|
||||
index entryCSN,entryUUID eq
|
||||
index cn,mail eq
|
||||
index gidNumber,uidNumber eq
|
||||
index member,memberUid,uniqueMember eq
|
||||
index virtualdomain eq
|
||||
index permission eq
|
||||
|
||||
# Save the time that the entry gets modified, for database #1
|
||||
lastmod on
|
||||
|
||||
# Checkpoint the BerkeleyDB database periodically in case of system
|
||||
# failure and to speed slapd shutdown.
|
||||
checkpoint 512 30
|
||||
|
||||
# The userPassword by default can be changed
|
||||
# by the entry owning it if they are authenticated.
|
||||
# Others should not be able to see it, except the
|
||||
# admin entry below
|
||||
# These access lines apply to database #1 only
|
||||
access to attrs=userPassword,shadowLastChange
|
||||
by dn="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by anonymous auth
|
||||
by self write
|
||||
by * none
|
||||
|
||||
# Personnal information can be changed by the entry
|
||||
# owning it if they are authenticated.
|
||||
# Others should be able to see it.
|
||||
access to attrs=cn,gecos,givenName,mail,maildrop,displayName,sn
|
||||
by dn="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by self write
|
||||
by * read
|
||||
|
||||
# Ensure read access to the base for things like
|
||||
# supportedSASLMechanisms. Without this you may
|
||||
# have problems with SASL not knowing what
|
||||
# mechanisms are available and the like.
|
||||
# Note that this is covered by the 'access to *'
|
||||
# ACL below too but if you change that as people
|
||||
# are wont to do you'll still need this if you
|
||||
# want SASL (and possible other things) to work
|
||||
# happily.
|
||||
access to dn.base="" by * read
|
||||
|
||||
# The admin dn has full write access, everyone else
|
||||
# can read everything.
|
||||
access to *
|
||||
by dn="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by group/groupOfNames/Member="cn=admin,ou=groups,dc=yunohost,dc=org" write
|
||||
by * read
|
||||
|
||||
# Configure Memberof Overlay (used for Yunohost permission)
|
||||
|
||||
# Link user <-> group
|
||||
#dn: olcOverlay={0}memberof,olcDatabase={1}mdb,cn=config
|
||||
overlay memberof
|
||||
memberof-group-oc groupOfNamesYnh
|
||||
memberof-member-ad member
|
||||
memberof-memberof-ad memberOf
|
||||
memberof-dangling error
|
||||
memberof-refint TRUE
|
||||
|
||||
# Link permission <-> groupes
|
||||
#dn: olcOverlay={1}memberof,olcDatabase={1}mdb,cn=config
|
||||
overlay memberof
|
||||
memberof-group-oc permissionYnh
|
||||
memberof-member-ad groupPermission
|
||||
memberof-memberof-ad permission
|
||||
memberof-dangling error
|
||||
memberof-refint TRUE
|
||||
|
||||
# Link permission <-> user
|
||||
#dn: olcOverlay={2}memberof,olcDatabase={1}mdb,cn=config
|
||||
overlay memberof
|
||||
memberof-group-oc permissionYnh
|
||||
memberof-member-ad inheritPermission
|
||||
memberof-memberof-ad permission
|
||||
memberof-dangling error
|
||||
memberof-refint TRUE
|
235
data/templates/slapd/slapd.ldif
Normal file
235
data/templates/slapd/slapd.ldif
Normal file
|
@ -0,0 +1,235 @@
|
|||
# OpenLDAP server configuration for Yunohost
|
||||
# ------------------------------------------
|
||||
#
|
||||
# Because of the Yunohost's regen-conf mechanism, it is NOT POSSIBLE to
|
||||
# edit the config database using an LDAP request.
|
||||
#
|
||||
# If you wish to edit the config database, you should edit THIS file
|
||||
# and update the config database based on this file.
|
||||
#
|
||||
# Config database customization:
|
||||
# 1. Edit this file as you want.
|
||||
# 2. Apply your modifications. For this just run this following command in a shell:
|
||||
# $ /usr/share/yunohost/hooks/conf_regen/06-slapd apply_config
|
||||
#
|
||||
# Note that if you customize this file, YunoHost's regen-conf will NOT
|
||||
# overwrite this file. But that also means that you should be careful about
|
||||
# upgrades, because they may ship important/necessary changes to this
|
||||
# configuration that you will have to propagate yourself.
|
||||
|
||||
#
|
||||
# Main configuration
|
||||
#
|
||||
dn: cn=config
|
||||
objectClass: olcGlobal
|
||||
cn: config
|
||||
olcConfigFile: /etc/ldap/slapd.conf
|
||||
olcConfigDir: /etc/ldap/slapd.d/
|
||||
# List of arguments that were passed to the server
|
||||
olcArgsFile: /var/run/slapd/slapd.args
|
||||
#
|
||||
olcAttributeOptions: lang-
|
||||
olcAuthzPolicy: none
|
||||
olcConcurrency: 0
|
||||
olcConnMaxPending: 100
|
||||
olcConnMaxPendingAuth: 1000
|
||||
olcIdleTimeout: 0
|
||||
olcIndexSubstrIfMaxLen: 4
|
||||
olcIndexSubstrIfMinLen: 2
|
||||
olcIndexSubstrAnyLen: 4
|
||||
olcIndexSubstrAnyStep: 2
|
||||
olcIndexIntLen: 4
|
||||
olcListenerThreads: 1
|
||||
olcLocalSSF: 71
|
||||
# Read slapd.conf(5) for possible values
|
||||
olcLogLevel: None
|
||||
# Where the pid file is put. The init.d script
|
||||
# will not stop the server if you change this.
|
||||
olcPidFile: /var/run/slapd/slapd.pid
|
||||
olcReverseLookup: FALSE
|
||||
olcThreads: 16
|
||||
# TLS Support
|
||||
olcTLSCertificateFile: /etc/yunohost/certs/yunohost.org/crt.pem
|
||||
olcTLSCertificateKeyFile: /etc/yunohost/certs/yunohost.org/key.pem
|
||||
olcTLSVerifyClient: never
|
||||
olcTLSProtocolMin: 0.0
|
||||
# The tool-threads parameter sets the actual amount of cpu's that is used
|
||||
# for indexing.
|
||||
olcToolThreads: 1
|
||||
structuralObjectClass: olcGlobal
|
||||
|
||||
#
|
||||
# Schema and objectClass definitions
|
||||
#
|
||||
dn: cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: schema
|
||||
|
||||
include: file:///etc/ldap/schema/core.ldif
|
||||
include: file:///etc/ldap/schema/cosine.ldif
|
||||
include: file:///etc/ldap/schema/nis.ldif
|
||||
include: file:///etc/ldap/schema/inetorgperson.ldif
|
||||
include: file:///etc/ldap/schema/mailserver.ldif
|
||||
include: file:///etc/ldap/schema/sudo.ldif
|
||||
include: file:///etc/ldap/schema/permission.ldif
|
||||
|
||||
#
|
||||
# Module management
|
||||
#
|
||||
dn: cn=module{0},cn=config
|
||||
objectClass: olcModuleList
|
||||
cn: module{0}
|
||||
# Where the dynamically loaded modules are stored
|
||||
olcModulePath: /usr/lib/ldap
|
||||
olcModuleLoad: {0}back_mdb
|
||||
olcModuleLoad: {1}memberof
|
||||
structuralObjectClass: olcModuleList
|
||||
|
||||
#
|
||||
# Frontend database
|
||||
#
|
||||
dn: olcDatabase={-1}frontend,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
objectClass: olcFrontendConfig
|
||||
olcDatabase: {-1}frontend
|
||||
olcAddContentAcl: FALSE
|
||||
olcLastMod: TRUE
|
||||
olcSchemaDN: cn=Subschema
|
||||
# Hashes to be used in generation of user passwords
|
||||
olcPasswordHash: {SSHA}
|
||||
structuralObjectClass: olcDatabaseConfig
|
||||
|
||||
#
|
||||
# Config database Configuration (#0)
|
||||
#
|
||||
dn: olcDatabase={0}config,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
olcDatabase: {0}config
|
||||
# Give access to root user.
|
||||
# This give the possiblity to the admin to customize the LDAP configuration
|
||||
olcAccess: {0}to * by * none
|
||||
olcAddContentAcl: TRUE
|
||||
olcLastMod: TRUE
|
||||
olcRootDN: cn=config
|
||||
structuralObjectClass: olcDatabaseConfig
|
||||
|
||||
#
|
||||
# Main database Configuration (#1)
|
||||
#
|
||||
dn: olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
objectClass: olcMdbConfig
|
||||
olcDatabase: {1}mdb
|
||||
# The base of your directory in database #1
|
||||
olcSuffix: dc=yunohost,dc=org
|
||||
#
|
||||
# The userPassword by default can be changed
|
||||
# by the entry owning it if they are authenticated.
|
||||
# Others should not be able to see it, except the
|
||||
# admin entry below
|
||||
# These access lines apply to database #1 only
|
||||
olcAccess: {0}to attrs=userPassword,shadowLastChange
|
||||
by dn.base="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by anonymous auth
|
||||
by self write
|
||||
by * none
|
||||
#
|
||||
# Personnal information can be changed by the entry
|
||||
# owning it if they are authenticated.
|
||||
# Others should be able to see it.
|
||||
olcAccess: {1}to attrs=cn,gecos,givenName,mail,maildrop,displayName,sn
|
||||
by dn.base="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by self write
|
||||
by * read
|
||||
#
|
||||
# Ensure read access to the base for things like
|
||||
# supportedSASLMechanisms. Without this you may
|
||||
# have problems with SASL not knowing what
|
||||
# mechanisms are available and the like.
|
||||
# Note that this is covered by the 'access to *'
|
||||
# ACL below too but if you change that as people
|
||||
# are wont to do you'll still need this if you
|
||||
# want SASL (and possible other things) to work
|
||||
# happily.
|
||||
olcAccess: {2}to dn.base=""
|
||||
by * read
|
||||
#
|
||||
# The admin dn has full write access, everyone else
|
||||
# can read everything.
|
||||
olcAccess: {3}to *
|
||||
by dn.base="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by group/groupOfNames/member.exact="cn=admin,ou=groups,dc=yunohost,dc=org" write
|
||||
by * read
|
||||
#
|
||||
olcAddContentAcl: FALSE
|
||||
# Save the time that the entry gets modified, for database #1
|
||||
olcLastMod: TRUE
|
||||
# Where the database file are physically stored for database #1
|
||||
olcDbDirectory: /var/lib/ldap
|
||||
# Checkpoint the BerkeleyDB database periodically in case of system
|
||||
# failure and to speed slapd shutdown.
|
||||
olcDbCheckpoint: 512 30
|
||||
olcDbNoSync: FALSE
|
||||
# Indexing options for database #1
|
||||
olcDbIndex: objectClass eq
|
||||
olcDbIndex: entryUUID eq
|
||||
olcDbIndex: entryCSN eq
|
||||
olcDbIndex: cn eq
|
||||
olcDbIndex: uid eq,sub
|
||||
olcDbIndex: uidNumber eq
|
||||
olcDbIndex: gidNumber eq
|
||||
olcDbIndex: sudoUser eq,sub
|
||||
olcDbIndex: member eq
|
||||
olcDbIndex: mail eq
|
||||
olcDbIndex: memberUid eq
|
||||
olcDbIndex: uniqueMember eq
|
||||
olcDbIndex: virtualdomain eq
|
||||
olcDbIndex: permission eq
|
||||
olcDbMaxSize: 10485760
|
||||
structuralObjectClass: olcMdbConfig
|
||||
|
||||
#
|
||||
# Configure Memberof Overlay (used for Yunohost permission)
|
||||
#
|
||||
|
||||
# Link user <-> group
|
||||
dn: olcOverlay={0}memberof,olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: {0}memberof
|
||||
olcMemberOfDangling: error
|
||||
olcMemberOfDanglingError: constraintViolation
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: groupOfNamesYnh
|
||||
olcMemberOfMemberAD: member
|
||||
olcMemberOfMemberOfAD: memberOf
|
||||
structuralObjectClass: olcMemberOf
|
||||
|
||||
# Link permission <-> groupes
|
||||
dn: olcOverlay={1}memberof,olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: {1}memberof
|
||||
olcMemberOfDangling: error
|
||||
olcMemberOfDanglingError: constraintViolation
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: permissionYnh
|
||||
olcMemberOfMemberAD: groupPermission
|
||||
olcMemberOfMemberOfAD: permission
|
||||
structuralObjectClass: olcMemberOf
|
||||
|
||||
# Link permission <-> user
|
||||
dn: olcOverlay={2}memberof,olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: {2}memberof
|
||||
olcMemberOfDangling: error
|
||||
olcMemberOfDanglingError: constraintViolation
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: permissionYnh
|
||||
olcMemberOfMemberAD: inheritPermission
|
||||
olcMemberOfMemberOfAD: permission
|
||||
structuralObjectClass: olcMemberOf
|
|
@ -1,76 +1,78 @@
|
|||
#
|
||||
# OpenLDAP schema file for Sudo
|
||||
# Save as /etc/openldap/schema/sudo.schema
|
||||
# Save as /etc/openldap/schema/sudo.ldif
|
||||
#
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.1
|
||||
dn: cn=sudo,cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: sudo
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.1
|
||||
NAME 'sudoUser'
|
||||
DESC 'User(s) who may run sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SUBSTR caseExactIA5SubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.2
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.2
|
||||
NAME 'sudoHost'
|
||||
DESC 'Host(s) who may run sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SUBSTR caseExactIA5SubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.3
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.3
|
||||
NAME 'sudoCommand'
|
||||
DESC 'Command(s) to be executed by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.4
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.4
|
||||
NAME 'sudoRunAs'
|
||||
DESC 'User(s) impersonated by sudo (deprecated)'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.5
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.5
|
||||
NAME 'sudoOption'
|
||||
DESC 'Options(s) followed by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.6
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.6
|
||||
NAME 'sudoRunAsUser'
|
||||
DESC 'User(s) impersonated by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.7
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.7
|
||||
NAME 'sudoRunAsGroup'
|
||||
DESC 'Group(s) impersonated by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.8
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.8
|
||||
NAME 'sudoNotBefore'
|
||||
DESC 'Start of time interval for which the entry is valid'
|
||||
EQUALITY generalizedTimeMatch
|
||||
ORDERING generalizedTimeOrderingMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.9
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.9
|
||||
NAME 'sudoNotAfter'
|
||||
DESC 'End of time interval for which the entry is valid'
|
||||
EQUALITY generalizedTimeMatch
|
||||
ORDERING generalizedTimeOrderingMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 )
|
||||
|
||||
attributeTypes ( 1.3.6.1.4.1.15953.9.1.10
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.10
|
||||
NAME 'sudoOrder'
|
||||
DESC 'an integer to order the sudoRole entries'
|
||||
EQUALITY integerMatch
|
||||
ORDERING integerOrderingMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )
|
||||
|
||||
objectclass ( 1.3.6.1.4.1.15953.9.2.1 NAME 'sudoRole' SUP top STRUCTURAL
|
||||
#
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.15953.9.2.1 NAME 'sudoRole' SUP top STRUCTURAL
|
||||
DESC 'Sudoer Entries'
|
||||
MUST ( cn )
|
||||
MAY ( sudoUser $ sudoHost $ sudoCommand $ sudoRunAs $ sudoRunAsUser $ sudoRunAsGroup $ sudoOption $ sudoOrder $ sudoNotBefore $ sudoNotAfter $
|
||||
description )
|
||||
MAY ( sudoUser $ sudoHost $ sudoCommand $ sudoRunAs $ sudoRunAsUser $ sudoRunAsGroup $ sudoOption $ sudoOrder $ sudoNotBefore $ sudoNotAfter $ description )
|
||||
)
|
|
@ -27,9 +27,6 @@ HostKey {{ key }}{% endfor %}
|
|||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com
|
||||
{% endif %}
|
||||
|
||||
# Use kernel sandbox mechanisms where possible in unprivileged processes
|
||||
UsePrivilegeSeparation sandbox
|
||||
|
||||
# LogLevel VERBOSE logs user's key fingerprint on login.
|
||||
# Needed to have a clear audit track of which key was using to log in.
|
||||
SyslogFacility AUTH
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# https://wiki.debian.org/UnattendedUpgrades#automatic_call_via_.2Fetc.2Fapt.2Fapt.conf.d.2F02periodic
|
||||
APT::Periodic::Enable "1";
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
APT::Periodic::Verbose "1";
|
|
@ -1,36 +0,0 @@
|
|||
// Automatically upgrade packages from these (origin, archive) pairs
|
||||
Unattended-Upgrade::Allowed-Origins {
|
||||
"${distro_id} stable";
|
||||
"${distro_id} testing";
|
||||
"Depot-Debian testing";
|
||||
"${distro_id} ${distro_codename}-security";
|
||||
"${distro_id} ${distro_codename}-updates";
|
||||
// "${distro_id} ${distro_codename}-proposed-updates";
|
||||
};
|
||||
|
||||
// List of packages to not update
|
||||
Unattended-Upgrade::Package-Blacklist {
|
||||
// "vim";
|
||||
// "libc6";
|
||||
// "libc6-dev";
|
||||
// "libc6-i686";
|
||||
};
|
||||
|
||||
// Send email to this address for problems or packages upgrades
|
||||
// If empty or unset then no email is sent, make sure that you
|
||||
// have a working mail setup on your system. The package 'mailx'
|
||||
// must be installed or anything that provides /usr/bin/mail.
|
||||
//Unattended-Upgrade::Mail "root@localhost";
|
||||
|
||||
// Do automatic removal of new unused dependencies after the upgrade
|
||||
// (equivalent to apt-get autoremove)
|
||||
Unattended-Upgrade::Remove-Unused-Dependencies "true";
|
||||
|
||||
// Automatically reboot *WITHOUT CONFIRMATION* if a
|
||||
// the file /var/run/reboot-required is found after the upgrade
|
||||
Unattended-Upgrade::Automatic-Reboot "false";
|
||||
|
||||
|
||||
// Use apt bandwidth limit feature, this example limits the download
|
||||
// speed to 70kb/sec
|
||||
//Acquire::http::Dl-Limit "70";
|
|
@ -20,10 +20,9 @@ nginx:
|
|||
test_conf: nginx -t
|
||||
needs_exposed_ports: [80, 443]
|
||||
category: web
|
||||
nslcd: {}
|
||||
php7.0-fpm:
|
||||
log: /var/log/php7.0-fpm.log
|
||||
test_conf: php-fpm7.0 --test
|
||||
php7.3-fpm:
|
||||
log: /var/log/php7.3-fpm.log
|
||||
test_conf: php-fpm7.3 --test
|
||||
category: web
|
||||
postfix:
|
||||
log: [/var/log/mail.log,/var/log/mail.err]
|
||||
|
@ -64,3 +63,5 @@ postgrey: null
|
|||
spamassassin: null
|
||||
rmilter: null
|
||||
php5-fpm: null
|
||||
php7.0-fpm: null
|
||||
nslcd: null
|
||||
|
|
140
debian/changelog
vendored
140
debian/changelog
vendored
|
@ -1,3 +1,143 @@
|
|||
yunohost (4.0.4) stable; urgency=low
|
||||
|
||||
- Debugging and robustness improvements for postgresql 9.6 -> 11 and xtables->nftables migrations (accc2da4, 59bd7d66, 4cb6f7fd, 4b14402c)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 12 Aug 2020 18:14:00 +0200
|
||||
|
||||
yunohost (4.0.3) stable; urgency=low
|
||||
|
||||
- Bump version number for stable release
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 29 Jul 2020 17:00:00 +0200
|
||||
|
||||
yunohost (4.0.2~beta) testing; urgency=low
|
||||
|
||||
- [mod] Rebase on stretch-unstable to include recent changes
|
||||
- [fix] Create admin's home during postinstall (#1021)
|
||||
|
||||
Thanks to all contributors <3 ! (Kay0u)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 19 Jun 2020 15:16:26 +0200
|
||||
|
||||
yunohost (4.0.1~alpha) testing; urgency=low
|
||||
|
||||
- [fix] It just make no sense to backup/restore the mysql password... (#911)
|
||||
- [fix] Fix getopts and helpers (#885, #886)
|
||||
- [fix] Explicitly create home using mkhomedir_helper instead of obscure pam rule that doesn't work anymore (b67ff314)
|
||||
- [fix] Ldap interface seems to expect lists everywhere now? (fb8c2b7b)
|
||||
- [deb] Clean control file, remove some legacy Conflicts and Replaces (ca0d4933)
|
||||
- [deb] Add conflicts with versions from backports for critical dependencies (#967)
|
||||
- [cleanup] Stale / legacy code (217aaa36, d77da6a0, af047468, 82d468a3)
|
||||
- [conf] Automatically disable/stop systemd-resolved that conflicts with dnsmasq on fresh setups ... (e7214b37)
|
||||
- [conf] Remove deprecated option in sshd conf, c.f. https://patchwork.openembedded.org/patch/139981/ (2723d245)
|
||||
- [conf] Small tweak in dovecot conf (deprecated settings) (dc0481e2)
|
||||
- [conf] Update nslcd and nsswitch stuff using new Buster's default configs + get rid of nslcd service, only keep the regen-conf part (6ef3520f)
|
||||
- [php] Migrate from php7.0 to php7.3 (3374e653, 9be10506, dd9564d3, 9679c291, 212a15e4, 25fcaa19, c4ad66f5)
|
||||
- [psql] Migrate from psql 11 to 9.6 (e88aed72, 4920d4f9, c70b0ae4)
|
||||
- [firewall] Migrate from xtable to nftable (05fb58f2, 2c4a8b73, 625d5372)
|
||||
- [slapd] Rework slapd regenconf to use new backend (#984)
|
||||
|
||||
Thanks to all contributors <3 ! (Étienne M., Josué, Kay0u)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 05 Jun 2020 03:10:09 +0200
|
||||
|
||||
yunohost (3.8.5.5) stable; urgency=low
|
||||
|
||||
- [enh] Allow to extend the nginx default_server configuration (f1bfc521)
|
||||
- [mod] Move redirect to /yunohost/admin to a separate nginx conf file to allow customizing it more easily (ac9182d6)
|
||||
- [enh] Make sure to validate/upgrade that we don't have any active weak certificate used by nginx at the beginning of the buster migration, otherwise nginx will later miserably fail to start (d4358897)
|
||||
- [fix] get_files_diff crashing if {orig,new}_file is None (7bfe564a)
|
||||
- [enh] Remove some useless message about file that "wasn't deleted because it doesn't exist." (#1024)
|
||||
- [mod] Remove useless robot protection code (#1026)
|
||||
- [fix] Let's not redefine the value for the 'service' var ... (1a2f26dc)
|
||||
- [fix] More general stretch->buster patching for sources.list (#1028)
|
||||
- [mod] Tweak custom disclaimer about the migration still being a bit touchy in preparation for stable release (852dea07)
|
||||
- [mod] Typo/wording in en.json (#1030)
|
||||
- [i18n] Translations updated for Catalan, French, Italian, Occitan
|
||||
|
||||
Thanks to all contributors <3 ! (É. Gaspar, Kay0u, L. Noferini, ppr, Quentí, xaloc33)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Mon, 27 Jul 2020 19:03:33 +0200
|
||||
|
||||
yunohost (3.8.5.4) testing; urgency=low
|
||||
|
||||
- [fix] Fix unscd version parsing *again*
|
||||
- [fix] Enforce permissions on rspamd log directory
|
||||
- [enh] Ignore stupid warnings about sudo-ldap that is already provided
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sun, 21 Jun 2020 23:37:09 +0200
|
||||
|
||||
yunohost (3.8.5.3) testing; urgency=low
|
||||
|
||||
- [fix] Fix the fix about unscd downgrade :/
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 19 Jun 2020 18:50:58 +0200
|
||||
|
||||
yunohost (3.8.5.2) testing; urgency=low
|
||||
|
||||
- [fix] Small issue with unscd upgrade/downgrade ... new version ain't always 0.53.1, so find it using dirty scrapping
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 18 Jun 2020 16:19:35 +0200
|
||||
|
||||
yunohost (3.8.5.1) testing; urgency=low
|
||||
|
||||
- [fix] Update Stretch->Buster migration disclaimer to make it clear that this is alpha-stage
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 06 Jun 2020 03:30:00 +0200
|
||||
|
||||
yunohost (3.8.5) testing; urgency=low
|
||||
|
||||
- [enh] Add migration procedure for Stretch->Buster (a2b83c0f, a26411db, 9f1211e9, e544bf3e, a0511cca)
|
||||
- [fix] Disable/skip ntp when inside a container (9d0c0924)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 06 Jun 2020 02:11:51 +0200
|
||||
|
||||
yunohost (3.8.4.9) stable; urgency=low
|
||||
|
||||
- [fix] Force lowercase on domain names (804f4b3e)
|
||||
- [fix] Add dirmngr to Depends:, needed for apt-key / gpg (cd115ed8)
|
||||
- [fix] Improve debugging when diagnosis ain't happy when renewing certs (0f0194be)
|
||||
- [enh] Add yunohost version to logs metadata (d615546b)
|
||||
- [enh] Alway filter irrelevant log lines when sharing it (38704cba, 51d53be5)
|
||||
- [fix] Regen-conf outputing many 'forget-about-it' because of files flagged as to be removed (f4525488)
|
||||
- [fix] postfix per-domain destination concurrency (#988)
|
||||
- [fix] Call regenconf for ssh before the general regenconf during the postinstall to avoid an irrelevant warning (7805837b)
|
||||
- [i18n] Translations updated for Catalan, French, German
|
||||
|
||||
Thanks to all contributors <3 ! (taziden, ljf, ppr, xaloc33, Yasss Gurl)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 18 Jun 2020 15:13:01 +0200
|
||||
|
||||
yunohost (3.8.4.8) stable; urgency=low
|
||||
|
||||
- [fix] Don't add unprotected_urls if it's already in skipped_urls (#1005)
|
||||
- [enh] Add pre-defined DHE group and set up Nginx to use it (#1007)
|
||||
- [fix] Make sure to propagate change in slapd systemd conf during initial install (2d42480f)
|
||||
- [fix] More accurate grep to avoid mistakenly grepping commented lines... (2408a620)
|
||||
- [enh] Update n to 6.5.1 (#1012)
|
||||
- [fix] Set sury default pinning to 600 (653c5fde)
|
||||
- [enh] Clean stale file/hashes in regen-conf (#1009)
|
||||
- [fix] Weirdness in regen-conf mechanism for SSH conf (#1014)
|
||||
|
||||
Thanks to all contributors <3 ! (É. Gaspar, Josué, SohKa)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 06 Jun 2020 01:59:08 +0200
|
||||
|
||||
yunohost (3.8.4.7) stable; urgency=low
|
||||
|
||||
- [fix] Remove some remains of glances (17eec25e)
|
||||
- [fix] Force external resolution for reverse DNS dig (852cd14c)
|
||||
- [fix] Make sure mysql is an alias to mariadb (e24191ce, ca89607d)
|
||||
- [fix] Path for ynh_add_fpm_config template in restore (#1001)
|
||||
- [fix] Add -o Acquire::Retries=3 to fix some stupid network issues happening sometimes with apt (03432349)
|
||||
- [fix] ynh_setup_source: Retry wget on non-critical failures to try to avoid tmp dns issues (3d66eaec)
|
||||
- [fix] ynh_setup_source: Calling ynh_print_err in case of error didn't work, and we probably want a ynh_die here (55036fad)
|
||||
- [i18n] Translations updated for Catalan, French, Italian, Occitan
|
||||
|
||||
Thanks to all contributors <3 ! (JimboJoe, Leandro N., ppr, Quentí, xaloc33, yalh76)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 04 Jun 2020 02:28:33 +0200
|
||||
|
||||
yunohost (3.8.4.6) stable; urgency=low
|
||||
|
||||
- [fix] Bump server_names_hash_bucket_size to 128 to avoid nginx exploding for stupid reasons (b3db4d92)
|
||||
|
|
6
debian/conf/pam/mkhomedir
vendored
6
debian/conf/pam/mkhomedir
vendored
|
@ -1,6 +0,0 @@
|
|||
Name: Create home directory during login
|
||||
Default: yes
|
||||
Priority: 900
|
||||
Session-Type: Additional
|
||||
Session:
|
||||
required pam_mkhomedir.so umask=0022 skel=/etc/skel
|
33
debian/control
vendored
33
debian/control
vendored
|
@ -11,12 +11,11 @@ Package: yunohost
|
|||
Essential: yes
|
||||
Architecture: all
|
||||
Depends: ${python:Depends}, ${misc:Depends}
|
||||
, moulinette (>= 3.7), ssowat (>= 3.7)
|
||||
, moulinette (>= 4.0.0~alpha), ssowat (>= 4.0.0~alpha)
|
||||
, python-psutil, python-requests, python-dnspython, python-openssl
|
||||
, python-miniupnpc, python-dbus, python-jinja2
|
||||
, python-toml, python-packaging
|
||||
, apt, apt-transport-https
|
||||
, nginx, nginx-extras (>=1.6.2)
|
||||
, python-toml, python-packaging, python-publicsuffix
|
||||
, apt, apt-transport-https, dirmngr
|
||||
, php-fpm, php-ldap, php-intl
|
||||
, mariadb-server, php-mysql | php-mysqlnd
|
||||
, openssh-server, iptables, fail2ban, dnsutils, bind9utils
|
||||
|
@ -25,31 +24,29 @@ Depends: ${python:Depends}, ${misc:Depends}
|
|||
, dnsmasq, avahi-daemon, libnss-mdns, resolvconf, libnss-myhostname
|
||||
, postfix, postfix-ldap, postfix-policyd-spf-perl, postfix-pcre
|
||||
, dovecot-core, dovecot-ldap, dovecot-lmtpd, dovecot-managesieved, dovecot-antispam
|
||||
, rspamd (>= 1.6.0), opendkim-tools, postsrsd, procmail, mailutils
|
||||
, rspamd, opendkim-tools, postsrsd, procmail, mailutils
|
||||
, redis-server
|
||||
, metronome (>=3.14.0)
|
||||
, git, curl, wget, cron, unzip, jq
|
||||
, git, curl, wget, cron, unzip, jq, bc
|
||||
, lsb-release, haveged, fake-hwclock, equivs, lsof, whois, python-publicsuffix
|
||||
Recommends: yunohost-admin
|
||||
, ntp, inetutils-ping | iputils-ping
|
||||
, bash-completion, rsyslog
|
||||
, php-gd, php-curl, php-gettext, php-mcrypt
|
||||
, php-gd, php-curl, php-gettext
|
||||
, python-pip
|
||||
, unattended-upgrades
|
||||
, libdbd-ldap-perl, libnet-dns-perl
|
||||
Suggests: htop, vim, rsync, acpi-support-base, udisks2
|
||||
Conflicts: iptables-persistent
|
||||
, moulinette-yunohost, yunohost-config
|
||||
, yunohost-config-others, yunohost-config-postfix
|
||||
, yunohost-config-dovecot, yunohost-config-slapd
|
||||
, yunohost-config-nginx, yunohost-config-amavis
|
||||
, yunohost-config-mysql, yunohost-predepends
|
||||
, apache2, bind9
|
||||
Replaces: moulinette-yunohost, yunohost-config
|
||||
, yunohost-config-others, yunohost-config-postfix
|
||||
, yunohost-config-dovecot, yunohost-config-slapd
|
||||
, yunohost-config-nginx, yunohost-config-amavis
|
||||
, yunohost-config-mysql, yunohost-predepends
|
||||
, apache2
|
||||
, bind9
|
||||
, nginx-extras (>= 1.16)
|
||||
, openssl (>= 1.1.1g)
|
||||
, slapd (>= 2.4.49)
|
||||
, dovecot-core (>= 1:2.3.7)
|
||||
, redis-server (>= 5:5.0.7)
|
||||
, fail2ban (>= 0.11)
|
||||
, iptables (>= 1.8.3)
|
||||
Description: manageable and configured self-hosting server
|
||||
YunoHost aims to make self-hosting accessible to everyone. It configures
|
||||
an email, Web and IM server alongside a LDAP base. It also provides
|
||||
|
|
2
debian/install
vendored
2
debian/install
vendored
|
@ -8,11 +8,11 @@ data/other/yunoprompt.service /etc/systemd/system/
|
|||
data/other/password/* /usr/share/yunohost/other/password/
|
||||
data/other/dpkg-origins/yunohost /etc/dpkg/origins
|
||||
data/other/dnsbl_list.yml /usr/share/yunohost/other/
|
||||
data/other/ffdhe2048.pem /usr/share/yunohost/other/
|
||||
data/other/* /usr/share/yunohost/yunohost-config/moulinette/
|
||||
data/templates/* /usr/share/yunohost/templates/
|
||||
data/helpers /usr/share/yunohost/
|
||||
data/helpers.d/* /usr/share/yunohost/helpers.d/
|
||||
debian/conf/pam/* /usr/share/pam-configs/
|
||||
lib/metronome/modules/* /usr/lib/metronome/modules/
|
||||
locales/* /usr/lib/moulinette/yunohost/locales/
|
||||
src/yunohost /usr/lib/moulinette
|
||||
|
|
5
debian/postinst
vendored
5
debian/postinst
vendored
|
@ -29,11 +29,6 @@ do_configure() {
|
|||
|
||||
# Yunoprompt
|
||||
systemctl enable yunoprompt.service
|
||||
|
||||
# remove old PAM config and update it
|
||||
[[ ! -f /usr/share/pam-configs/my_mkhomedir ]] \
|
||||
|| rm /usr/share/pam-configs/my_mkhomedir
|
||||
pam-auth-update --package
|
||||
}
|
||||
|
||||
# summary of how this script can be called:
|
||||
|
|
|
@ -537,7 +537,7 @@
|
|||
"diagnosis_dns_good_conf": "Els registres DNS han estat correctament configurats pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_bad_conf": "Alguns registres DNS són incorrectes o no existeixen pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_missing_record": "Segons la configuració DNS recomanada, hauríeu d'afegir un registre DNS amb la següent informació.<br>Tipus: <code>{type}</code><br>Nom: <code>{name}</code><br>Valor: <code>{value}</code>",
|
||||
"diagnosis_dns_discrepancy": "El registre DNS de tipus {type} i nom {name} no concorda amb la configuració recomanada.\nValor actual: {current}\nValor esperat: {value}",
|
||||
"diagnosis_dns_discrepancy": "La configuració DNS següent sembla que no segueix la configuració recomanada: <br>Tipus: <code>{type}</code><br>Nom: <code>{name}</code><br>Valor actual: <code>{current}</code><br>Valor esperat: <code>{value}</code>",
|
||||
"diagnosis_services_bad_status": "El servei {service} està {status} :(",
|
||||
"diagnosis_diskusage_verylow": "El lloc d'emmagatzematge <code>{mountpoint}</code> (en l'aparell <code>{device}</code>) només té disponibles {free} ({free_percent}%). Hauríeu de considerar alliberar una mica d'espai!",
|
||||
"diagnosis_diskusage_low": "El lloc d'emmagatzematge <code>{mountpoint}</code> (en l'aparell <code>{device}</code>) només té disponibles {free} ({free_percent}%). Aneu amb compte.",
|
||||
|
@ -649,5 +649,23 @@
|
|||
"diagnosis_domain_expiration_error": "Alguns dominis expiraran EN BREUS!",
|
||||
"diagnosis_domain_expires_in": "{domain} expirarà en {days} dies.",
|
||||
"diagnosis_swap_tip": "Vigileu i tingueu en compte que els servidor està allotjant memòria d'intercanvi en una targeta SD o en l'emmagatzematge SSD, això pot reduir dràsticament l'esperança de vida del dispositiu.",
|
||||
"restore_already_installed_apps": "No s'han pogut restaurar les següents aplicacions perquè ja estan instal·lades: {apps}"
|
||||
"restore_already_installed_apps": "No s'han pogut restaurar les següents aplicacions perquè ja estan instal·lades: {apps}",
|
||||
"app_packaging_format_not_supported": "No es pot instal·lar aquesta aplicació ja que el format del paquet no és compatible amb la versió de YunoHost del sistema. Hauríeu de considerar actualitzar el sistema.",
|
||||
"diagnosis_dns_try_dyndns_update_force": "La configuració DNS d'aquest domini hauria de ser gestionada automàticament per YunoHost. Si aquest no és el cas, podeu intentar forçar-ne l'actualització utilitzant <cmd>yunohost dyndns update --force</cmd>.",
|
||||
"migration_0015_cleaning_up": "Netejant la memòria cau i els paquets que ja no són necessaris…",
|
||||
"migration_0015_specific_upgrade": "Començant l'actualització dels paquets del sistema que s'han d'actualitzar de forma independent…",
|
||||
"migration_0015_modified_files": "Tingueu en compte que s'han trobat els següents fitxers que es van modificar manualment i podria ser que es sobreescriguin durant l'actualització: {manually_modified_files}",
|
||||
"migration_0015_problematic_apps_warning": "Tingueu en compte que s'han trobat les següents aplicacions que podrien ser problemàtiques. Sembla que aquestes aplicacions no s'han instal·lat des del catàleg d'aplicacions de YunoHost, o no estan marcades com «funcionant». En conseqüència, no es pot garantir que segueixin funcionant després de l'actualització: {problematic_apps}",
|
||||
"migration_0015_general_warning": "Tingueu en compte que aquesta migració és una operació delicada. L'equip de YunoHost ha fet tots els possibles per revisar i testejar, però tot i això podria ser que la migració trenqui alguna part del sistema o algunes aplicacions.\n\nPer tant, està recomana:\n - Fer una còpia de seguretat de totes les dades o aplicacions crítiques. Més informació a https://yunohost.org/backup;\n - Ser pacient un cop comenci la migració: en funció de la connexió Internet i del maquinari, podria estar unes hores per actualitzar-ho tot.",
|
||||
"migration_0015_system_not_fully_up_to_date": "El sistema no està completament al dia. Heu de fer una actualització normal abans de fer la migració a Buster.",
|
||||
"migration_0015_not_enough_free_space": "Hi ha poc espai lliure a /var/! HI hauria d'haver un mínim de 1GB lliure per poder fer aquesta migració.",
|
||||
"migration_0015_not_stretch": "La distribució actual de Debian no és Stretch!",
|
||||
"migration_0015_yunohost_upgrade": "Començant l'actualització del nucli de YunoHost…",
|
||||
"migration_0015_still_on_stretch_after_main_upgrade": "Alguna cosa ha anat malament durant la actualització principal, sembla que el sistema encara està en Debian Stretch",
|
||||
"migration_0015_main_upgrade": "Començant l'actualització principal…",
|
||||
"migration_0015_patching_sources_list": "Apedaçament de source.lists…",
|
||||
"migration_0015_start": "Començant la migració a Buster",
|
||||
"migration_description_0015_migrate_to_buster": "Actualitza els sistema a Debian Buster i YunoHost 4.x",
|
||||
"regenconf_need_to_explicitly_specify_ssh": "La configuració ssh ha estat modificada manualment, però heu d'especificar explícitament la categoria «ssh» amb --force per fer realment els canvis.",
|
||||
"migration_0015_weak_certs": "S'han trobat els següents certificats que encara utilitzen algoritmes de signatura febles i s'han d'actualitzar per a ser compatibles amb la propera versió de nginx: {certs}"
|
||||
}
|
||||
|
|
|
@ -340,5 +340,6 @@
|
|||
"diagnosis_ip_weird_resolvconf": "DNS Auflösung scheint zu funktionieren, aber sei vorsichtig wenn du eine eigene <code>/etc/resolv.conf</code> verwendest.",
|
||||
"diagnosis_display_tip": "Um die gefundenen Probleme zu sehen, kannst Du zum Diagnose-Bereich des webadmin gehen, oder 'yunohost diagnosis show --issues' in der Kommandozeile ausführen.",
|
||||
"backup_archive_corrupted": "Das Backup-Archiv '{archive}' scheint beschädigt: {error}",
|
||||
"backup_archive_cant_retrieve_info_json": "Die Informationen für das Archiv '{archive}' konnten nicht geladen werden... Die Datei info.json wurde nicht gefunden (oder ist kein gültiges json)."
|
||||
"backup_archive_cant_retrieve_info_json": "Die Informationen für das Archiv '{archive}' konnten nicht geladen werden... Die Datei info.json wurde nicht gefunden (oder ist kein gültiges json).",
|
||||
"app_packaging_format_not_supported": "Diese App kann nicht installiert werden da das Paketformat nicht von der Yunohost-Version unterstützt wird. Denken Sie darüber nach das System zu aktualisieren."
|
||||
}
|
||||
|
|
181
locales/en.json
181
locales/en.json
|
@ -32,26 +32,26 @@
|
|||
"app_manifest_invalid": "Something is wrong with the app manifest: {error}",
|
||||
"app_not_upgraded": "The app '{failed_app}' failed to upgrade, and as a consequence the following apps' upgrades have been cancelled: {apps}",
|
||||
"app_not_correctly_installed": "{app:s} seems to be incorrectly installed",
|
||||
"app_not_installed": "Could not find the app '{app:s}' in the list of installed apps: {all_apps}",
|
||||
"app_not_installed": "Could not find {app:s} in the list of installed apps: {all_apps}",
|
||||
"app_not_properly_removed": "{app:s} has not been properly removed",
|
||||
"app_removed": "{app:s} removed",
|
||||
"app_requirements_checking": "Checking required packages for {app}…",
|
||||
"app_requirements_checking": "Checking required packages for {app}...",
|
||||
"app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}",
|
||||
"app_remove_after_failed_install": "Removing the app following the installation failure…",
|
||||
"app_remove_after_failed_install": "Removing the app following the installation failure...",
|
||||
"app_sources_fetch_failed": "Could not fetch sources files, is the URL correct?",
|
||||
"app_start_install": "Installing the app '{app}'…",
|
||||
"app_start_remove": "Removing the app '{app}'…",
|
||||
"app_start_backup": "Collecting files to be backed up for the app '{app}'…",
|
||||
"app_start_restore": "Restoring the app '{app}'…",
|
||||
"app_start_install": "Installing {app}...",
|
||||
"app_start_remove": "Removing {app}...",
|
||||
"app_start_backup": "Collecting files to be backed up for {app}...",
|
||||
"app_start_restore": "Restoring {app}...",
|
||||
"app_unknown": "Unknown app",
|
||||
"app_unsupported_remote_type": "Unsupported remote type used for the app",
|
||||
"app_upgrade_several_apps": "The following apps will be upgraded: {apps}",
|
||||
"app_upgrade_app_name": "Now upgrading {app}…",
|
||||
"app_upgrade_app_name": "Now upgrading {app}...",
|
||||
"app_upgrade_failed": "Could not upgrade {app:s}: {error}",
|
||||
"app_upgrade_script_failed": "An error occurred inside the app upgrade script",
|
||||
"app_upgrade_some_app_failed": "Some apps could not be upgraded",
|
||||
"app_upgraded": "{app:s} upgraded",
|
||||
"app_packaging_format_not_supported": "This app cannot be installed because its packaging format is not supported by your Yunohost version. You should probably consider upgrading your system.",
|
||||
"app_packaging_format_not_supported": "This app cannot be installed because its packaging format is not supported by your YunoHost version. You should probably consider upgrading your system.",
|
||||
"apps_already_up_to_date": "All apps are already up-to-date",
|
||||
"apps_catalog_init_success": "App catalog system initialized!",
|
||||
"apps_catalog_updating": "Updating application catalog…",
|
||||
|
@ -67,22 +67,22 @@
|
|||
"ask_new_path": "New path",
|
||||
"ask_password": "Password",
|
||||
"backup_abstract_method": "This backup method has yet to be implemented",
|
||||
"backup_actually_backuping": "Creating a backup archive from the collected files…",
|
||||
"backup_app_failed": "Could not back up the app '{app:s}'",
|
||||
"backup_applying_method_borg": "Sending all files to backup into borg-backup repository…",
|
||||
"backup_applying_method_copy": "Copying all files to backup…",
|
||||
"backup_applying_method_custom": "Calling the custom backup method '{method:s}'…",
|
||||
"backup_applying_method_tar": "Creating the backup TAR archive…",
|
||||
"backup_archive_app_not_found": "Could not find the app '{app:s}' in the backup archive",
|
||||
"backup_actually_backuping": "Creating a backup archive from the collected files...",
|
||||
"backup_app_failed": "Could not back up {app:s}",
|
||||
"backup_applying_method_borg": "Sending all files to backup into borg-backup repository...",
|
||||
"backup_applying_method_copy": "Copying all files to backup...",
|
||||
"backup_applying_method_custom": "Calling the custom backup method '{method:s}'...",
|
||||
"backup_applying_method_tar": "Creating the backup TAR archive...",
|
||||
"backup_archive_app_not_found": "Could not find {app:s} in the backup archive",
|
||||
"backup_archive_broken_link": "Could not access the backup archive (broken link to {path:s})",
|
||||
"backup_archive_name_exists": "A backup archive with this name already exists.",
|
||||
"backup_archive_name_unknown": "Unknown local backup archive named '{name:s}'",
|
||||
"backup_archive_open_failed": "Could not open the backup archive",
|
||||
"backup_archive_cant_retrieve_info_json": "Could not load infos for archive '{archive}' ... The info.json cannot be retrieved (or is not a valid json).",
|
||||
"backup_archive_cant_retrieve_info_json": "Could not load infos for archive '{archive}'... The info.json cannot be retrieved (or is not a valid json).",
|
||||
"backup_archive_corrupted": "It looks like the backup archive '{archive}' is corrupted : {error}",
|
||||
"backup_archive_system_part_not_available": "System part '{part:s}' unavailable in this backup",
|
||||
"backup_archive_writing_error": "Could not add the files '{source:s}' (named in the archive '{dest:s}') to be backed up into the compressed archive '{archive:s}'",
|
||||
"backup_ask_for_copying_if_needed": "Do you want to perform the backup using {size:s} MB temporarily? (This way is used since some files could not be prepared using a more efficient method.)",
|
||||
"backup_ask_for_copying_if_needed": "Do you want to perform the backup using {size:s}MB temporarily? (This way is used since some files could not be prepared using a more efficient method.)",
|
||||
"backup_borg_not_implemented": "The Borg backup method is not yet implemented",
|
||||
"backup_cant_mount_uncompress_archive": "Could not mount the uncompressed archive as write protected",
|
||||
"backup_cleaning_failed": "Could not clean up the temporary backup folder",
|
||||
|
@ -101,20 +101,19 @@
|
|||
"backup_method_copy_finished": "Backup copy finalized",
|
||||
"backup_method_custom_finished": "Custom backup method '{method:s}' finished",
|
||||
"backup_method_tar_finished": "TAR backup archive created",
|
||||
"backup_mount_archive_for_restore": "Preparing archive for restoration…",
|
||||
"backup_mount_archive_for_restore": "Preparing archive for restoration...",
|
||||
"backup_no_uncompress_archive_dir": "There is no such uncompressed archive directory",
|
||||
"backup_nothings_done": "Nothing to save",
|
||||
"backup_output_directory_forbidden": "Pick a different output directory. Backups cannot be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders",
|
||||
"backup_output_directory_not_empty": "You should pick an empty output directory",
|
||||
"backup_output_directory_required": "You must provide an output directory for the backup",
|
||||
"backup_output_symlink_dir_broken": "Your archive directory '{path:s}' is a broken symlink. Maybe you forgot to re/mount or plug in the storage medium it points to.",
|
||||
"backup_permission": "Backup permission for app {app:s}",
|
||||
"backup_php5_to_php7_migration_may_fail": "Could not convert your archive to support PHP 7, you may be unable to restore your PHP apps (reason: {error:s})",
|
||||
"backup_running_hooks": "Running backup hooks…",
|
||||
"backup_permission": "Backup permission for {app:s}",
|
||||
"backup_running_hooks": "Running backup hooks...",
|
||||
"backup_system_part_failed": "Could not backup the '{part:s}' system part",
|
||||
"backup_unable_to_organize_files": "Could not use the quick method to organize files in the archive",
|
||||
"backup_with_no_backup_script_for_app": "The app '{app:s}' has no backup script. Ignoring.",
|
||||
"backup_with_no_restore_script_for_app": "The '{app:s}' has no restoration script, you will not be able to automatically restore the backup of this app.",
|
||||
"backup_with_no_restore_script_for_app": "{app:s} has no restoration script, you will not be able to automatically restore the backup of this app.",
|
||||
"certmanager_acme_not_configured_for_domain": "The ACME challenge cannot be ran for {domain} right now because its nginx conf lacks the corresponding code snippet... Please make sure that your nginx configuration is up to date using `yunohost tools regen-conf nginx --dry-run --with-diff`.",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "The certificate for the domain '{domain:s}' is not issued by Let's Encrypt. Cannot renew it automatically!",
|
||||
"certmanager_attempt_to_renew_valid_cert": "The certificate for the domain '{domain:s}' is not about to expire! (You may use --force if you know what you're doing)",
|
||||
|
@ -124,9 +123,9 @@
|
|||
"certmanager_cert_install_success_selfsigned": "Self-signed certificate now installed for the domain '{domain:s}'",
|
||||
"certmanager_cert_renew_success": "Let's Encrypt certificate renewed for the domain '{domain:s}'",
|
||||
"certmanager_cert_signing_failed": "Could not sign the new certificate",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Trying to use the new certificate for {domain:s} did not work…",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Trying to use the new certificate for {domain:s} did not work...",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Timed out when trying to fetch intermediate certificate from Let's Encrypt. Certificate installation/renewal aborted—please try again later.",
|
||||
"certmanager_domain_not_diagnosed_yet": "There is no diagnosis result for domain %s yet. Please re-run a diagnosis for categories 'DNS records' and 'Web' in the diagnosis section to check if the domain is ready for Let's Encrypt. (Or if you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_not_diagnosed_yet": "There is no diagnosis result for domain {domain} yet. Please re-run a diagnosis for categories 'DNS records' and 'Web' in the diagnosis section to check if the domain is ready for Let's Encrypt. (Or if you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_cert_not_selfsigned": "The certificate for domain {domain:s} is not self-signed. Are you sure you want to replace it? (Use '--force' to do so.)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "The DNS records for domain '{domain:s}' is different from this server's IP. Please check the 'DNS records' (basic) category in the diagnosis for more info. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_http_not_working": "Domain {domain:s} does not seem to be accessible through HTTP. Please check the 'Web' category in the diagnosis for more info. (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
|
@ -158,9 +157,9 @@
|
|||
"diagnosis_everything_ok": "Everything looks good for {category}!",
|
||||
"diagnosis_failed": "Failed to fetch diagnosis result for category '{category}': {error}",
|
||||
"diagnosis_no_cache": "No diagnosis cache yet for category '{category}'",
|
||||
"diagnosis_ip_connected_ipv4": "The server is connected to the Internet through IPv4 !",
|
||||
"diagnosis_ip_connected_ipv4": "The server is connected to the Internet through IPv4!",
|
||||
"diagnosis_ip_no_ipv4": "The server does not have working IPv4.",
|
||||
"diagnosis_ip_connected_ipv6": "The server is connected to the Internet through IPv6 !",
|
||||
"diagnosis_ip_connected_ipv6": "The server is connected to the Internet through IPv6!",
|
||||
"diagnosis_ip_no_ipv6": "The server does not have working IPv6.",
|
||||
"diagnosis_ip_no_ipv6_tip": "Having a working IPv6 is not mandatory for your server to work, but it is better for the health of the Internet as a whole. IPv6 should usually be automatically configured by the system or your provider if it's available. Otherwise, you might need to configure a few things manually as explained in the documentation here: <a href='https://yunohost.org/#/ipv6'>https://yunohost.org/#/ipv6</a>. If you cannot enable IPv6 or if it seems too technical for you, you can also safely ignore this warning.",
|
||||
"diagnosis_ip_global": "Global IP: <code>{global}</code>",
|
||||
|
@ -289,7 +288,7 @@
|
|||
"dyndns_cron_removed": "DynDNS cron job removed",
|
||||
"dyndns_ip_update_failed": "Could not update IP address to DynDNS",
|
||||
"dyndns_ip_updated": "Updated your IP on DynDNS",
|
||||
"dyndns_key_generating": "Generating DNS key… It may take a while.",
|
||||
"dyndns_key_generating": "Generating DNS key... It may take a while.",
|
||||
"dyndns_key_not_found": "DNS key not found for the domain",
|
||||
"dyndns_no_domain_registered": "No domain registered with DynDNS",
|
||||
"dyndns_provider_unreachable": "Unable to reach DynDNS provider {provider}: either your YunoHost is not correctly connected to the internet or the dynette server is down.",
|
||||
|
@ -297,9 +296,9 @@
|
|||
"dyndns_registration_failed": "Could not register DynDNS domain: {error:s}",
|
||||
"dyndns_domain_not_provided": "DynDNS provider {provider:s} cannot provide domain {domain:s}.",
|
||||
"dyndns_unavailable": "The domain '{domain:s}' is unavailable.",
|
||||
"executing_command": "Executing command '{command:s}'…",
|
||||
"executing_script": "Executing script '{script:s}'…",
|
||||
"extracting": "Extracting…",
|
||||
"executing_command": "Executing command '{command:s}'...",
|
||||
"executing_script": "Executing script '{script:s}'...",
|
||||
"extracting": "Extracting...",
|
||||
"experimental_feature": "Warning: This feature is experimental and not considered stable, you should not use it unless you know what you are doing.",
|
||||
"field_invalid": "Invalid field '{:s}'",
|
||||
"file_does_not_exist": "The file {path:s} does not exist.",
|
||||
|
@ -331,7 +330,7 @@
|
|||
"good_practices_about_user_password": "You are now about to define a new user password. The password should be at least 8 characters long—though it is good practice to use a longer password (i.e. a passphrase) and/or to a variation of characters (uppercase, lowercase, digits and special characters).",
|
||||
"group_already_exist": "Group {group} already exists",
|
||||
"group_already_exist_on_system": "Group {group} already exists in the system groups",
|
||||
"group_already_exist_on_system_but_removing_it": "Group {group} already exists in the system groups, but YunoHost will remove it…",
|
||||
"group_already_exist_on_system_but_removing_it": "Group {group} already exists in the system groups, but YunoHost will remove it...",
|
||||
"group_created": "Group '{group}' created",
|
||||
"group_creation_failed": "Could not create the group '{group}': {error}",
|
||||
"group_cannot_edit_all_users": "The group 'all_users' cannot be edited manually. It is a special group meant to contain all users registered in YunoHost",
|
||||
|
@ -411,74 +410,49 @@
|
|||
"mail_unavailable": "This e-mail address is reserved and shall be automatically allocated to the very first user",
|
||||
"main_domain_change_failed": "Unable to change the main domain",
|
||||
"main_domain_changed": "The main domain has been changed",
|
||||
"migrate_tsig_end": "Migration to HMAC-SHA-512 finished",
|
||||
"migrate_tsig_failed": "Could not migrate the DynDNS domain '{domain}' to HMAC-SHA-512, rolling back. Error: {error_code}, {error}",
|
||||
"migrate_tsig_start": "Insufficiently secure key algorithm detected for TSIG signature of the domain '{domain}', initiating migration to the more secure HMAC-SHA-512",
|
||||
"migrate_tsig_wait": "Waiting three minutes for the DynDNS server to take the new key into account…",
|
||||
"migrate_tsig_wait_2": "2min…",
|
||||
"migrate_tsig_wait_3": "1min…",
|
||||
"migrate_tsig_wait_4": "30 seconds…",
|
||||
"migrate_tsig_not_needed": "You do not appear to use a DynDNS domain, so no migration is needed.",
|
||||
"migration_description_0001_change_cert_group_to_sslcert": "Change certificates group permissions from 'metronome' to 'ssl-cert'",
|
||||
"migration_description_0002_migrate_to_tsig_sha256": "Improve security of DynDNS TSIG updates by using SHA-512 instead of MD5",
|
||||
"migration_description_0003_migrate_to_stretch": "Upgrade the system to Debian Stretch and YunoHost 3.0",
|
||||
"migration_description_0004_php5_to_php7_pools": "Reconfigure the PHP pools to use PHP 7 instead of 5",
|
||||
"migration_description_0005_postgresql_9p4_to_9p6": "Migrate databases from PostgreSQL 9.4 to 9.6",
|
||||
"migration_description_0006_sync_admin_and_root_passwords": "Synchronize admin and root passwords",
|
||||
"migration_description_0007_ssh_conf_managed_by_yunohost_step1": "Let the SSH configuration be managed by YunoHost (step 1, automatic)",
|
||||
"migration_description_0008_ssh_conf_managed_by_yunohost_step2": "Let the SSH configuration be managed by YunoHost (step 2, manual)",
|
||||
"migration_description_0009_decouple_regenconf_from_services": "Decouple the regen-conf mechanism from services",
|
||||
"migration_description_0010_migrate_to_apps_json": "Remove deprecated apps catalogs and use the new unified 'apps.json' list instead (outdated, replaced by migration 13)",
|
||||
"migration_description_0011_setup_group_permission": "Set up user groups and permissions for apps and services",
|
||||
"migration_description_0012_postgresql_password_to_md5_authentication": "Force PostgreSQL authentication to use MD5 for local connections",
|
||||
"migration_description_0013_futureproof_apps_catalog_system": "Migrate to the new future-proof apps catalog system",
|
||||
"migration_description_0014_remove_app_status_json": "Remove legacy status.json app files",
|
||||
"migration_description_0015_extends_permissions_features_1": "Extends permissions features, step 1",
|
||||
"migration_0003_start": "Starting migration to Stretch. The logs will be available in {logfile}.",
|
||||
"migration_0003_patching_sources_list": "Patching the sources.lists…",
|
||||
"migration_0003_main_upgrade": "Starting main upgrade…",
|
||||
"migration_0003_fail2ban_upgrade": "Starting the Fail2Ban upgrade…",
|
||||
"migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset it to its original state first… The previous file will be available as {backup_dest}.",
|
||||
"migration_0003_yunohost_upgrade": "Starting the YunoHost package upgrade… The migration will end, but the actual upgrade will happen immediately afterwards. After the operation is complete, you might have to log in to the webadmin page again.",
|
||||
"migration_0003_not_jessie": "The current Debian distribution is not Jessie!",
|
||||
"migration_0003_system_not_fully_up_to_date": "Your system is not fully up-to-date. Please perform a regular upgrade before running the migration to Stretch.",
|
||||
"migration_0003_still_on_jessie_after_main_upgrade": "Something went wrong during the main upgrade: Is the system still on Jessie‽ To investigate the issue, please look at {log}:s…",
|
||||
"migration_0003_general_warning": "Please note that this migration is a delicate operation. The YunoHost team did its best to review and test it, but the migration might still break parts of the system or its apps.\n\nTherefore, it is recommended to:\n - Perform a backup of any critical data or app. More info on https://yunohost.org/backup;\n - Be patient after launching the migration: Depending on your Internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external e-mail clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port (465) will automatically be closed, and the new port (587) will be opened in the firewall. You and your users *will* have to adapt the configuration of your e-mail clients accordingly.",
|
||||
"migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an app catalog, or are not flagged as 'working'. Consequently, it cannot be guaranteed that they will still work after the upgrade: {problematic_apps}",
|
||||
"migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten following the upgrade: {manually_modified_files}",
|
||||
"migration_0005_postgresql_94_not_installed": "PostgreSQL was not installed on your system. Nothing to do.",
|
||||
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 is installed, but not postgresql 9.6‽ Something weird might have happened on your system :(…",
|
||||
"migration_0005_not_enough_space": "Make sufficient space available in {path} to run the migration.",
|
||||
"migration_0006_disclaimer": "YunoHost now expects the admin and root passwords to be synchronized. This migration replaces your root password with the admin password.",
|
||||
"migration_0007_cancelled": "Could not improve the way your SSH configuration is managed.",
|
||||
"migration_0007_cannot_restart": "SSH can't be restarted after trying to cancel migration number 6.",
|
||||
"migration_0008_general_disclaimer": "To improve the security of your server, it is recommended to let YunoHost manage the SSH configuration. Your current SSH setup differs from the recommendation. If you let YunoHost reconfigure it, the way you connect to your server through SSH will change thusly:",
|
||||
"migration_0008_port": "• You will have to connect using port 22 instead of your current custom SSH port. Feel free to reconfigure it;",
|
||||
"migration_0008_root": "• You will not be able to connect as root through SSH. Instead you should use the admin user;",
|
||||
"migration_0008_dsa": "• The DSA key will be turned off. Hence, you might need to invalidate a spooky warning from your SSH client, and recheck the fingerprint of your server;",
|
||||
"migration_0008_warning": "If you understand those warnings and want YunoHost to override your current configuration, run the migration. Otherwise, you can also skip the migration, though it is not recommended.",
|
||||
"migration_0008_no_warning": "Overriding your SSH configuration should be safe, though this cannot be promised! Run the migration to override it. Otherwise, you can also skip the migration, though it is not recommended.",
|
||||
"migration_0009_not_needed": "This migration already happened somehow… (?) Skipping.",
|
||||
"migration_0011_backup_before_migration": "Creating a backup of LDAP database and apps settings prior to the actual migration.",
|
||||
"migration_0011_can_not_backup_before_migration": "The backup of the system could not be completed before the migration failed. Error: {error:s}",
|
||||
"migration_description_0015_migrate_to_buster": "Upgrade the system to Debian Buster and YunoHost 4.x",
|
||||
"migration_description_0016_php70_to_php73_pools": "Migrate php7.0-fpm 'pool' conf files to php7.3",
|
||||
"migration_description_0017_postgresql_9p6_to_11": "Migrate databases from PostgreSQL 9.6 to 11",
|
||||
"migration_description_0018_xtable_to_nftable": "Migrate old network traffic rules to the new nftable system",
|
||||
"migration_0011_create_group": "Creating a group for each user…",
|
||||
"migration_0011_done": "Migration completed. You are now able to manage usergroups.",
|
||||
"migration_0011_slapd_config_will_be_overwritten": "It looks like you manually edited the slapd configuration. For this critical migration, YunoHost needs to force the update of the slapd configuration. The original files will be backuped in {conf_backup_folder}.",
|
||||
"migration_0011_LDAP_update_failed": "Could not update LDAP. Error: {error:s}",
|
||||
"migration_0011_migrate_permission": "Migrating permissions from apps settings to LDAP…",
|
||||
"migration_0011_migration_failed_trying_to_rollback": "Could not migrate… trying to roll back the system.",
|
||||
"migration_0011_rollback_success": "System rolled back.",
|
||||
"migration_0011_update_LDAP_database": "Updating LDAP database…",
|
||||
"migration_0011_update_LDAP_schema": "Updating LDAP schema…",
|
||||
"migration_0011_migrate_permission": "Migrating permissions from apps settings to LDAP...",
|
||||
"migration_0011_update_LDAP_database": "Updating LDAP database...",
|
||||
"migration_0011_update_LDAP_schema": "Updating LDAP schema...",
|
||||
"migration_0011_failed_to_remove_stale_object": "Could not remove stale object {dn}: {error}",
|
||||
"migration_0015_add_new_attributes_in_ldap": "Add new attributes for permissions in LDAP database",
|
||||
"migration_0015_start" : "Starting migration to Buster",
|
||||
"migration_0015_patching_sources_list": "Patching the sources.lists...",
|
||||
"migration_0015_main_upgrade": "Starting main upgrade...",
|
||||
"migration_0015_still_on_stretch_after_main_upgrade": "Something went wrong during the main upgrade, the system appears to still be on Debian Stretch",
|
||||
"migration_0015_yunohost_upgrade" : "Starting YunoHost core upgrade...",
|
||||
"migration_0015_not_stretch" : "The current Debian distribution is not Stretch!",
|
||||
"migration_0015_not_enough_free_space" : "Free space is pretty low in /var/! You should have at least 1GB free to run this migration.",
|
||||
"migration_0015_system_not_fully_up_to_date": "Your system is not fully up-to-date. Please perform a regular upgrade before running the migration to Buster.",
|
||||
"migration_0015_general_warning": "Please note that this migration is a delicate operation. The YunoHost team did its best to review and test it, but the migration might still break parts of the system or its apps.\n\nTherefore, it is recommended to:\n - Perform a backup of any critical data or app. More info on https://yunohost.org/backup;\n - Be patient after launching the migration: Depending on your Internet connection and hardware, it might take up to a few hours for everything to upgrade.",
|
||||
"migration_0015_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from the YunoHost app catalog, or are not flagged as 'working'. Consequently, it cannot be guaranteed that they will still work after the upgrade: {problematic_apps}",
|
||||
"migration_0015_modified_files": "Please note that the following files were found to be manually modified and might be overwritten following the upgrade: {manually_modified_files}",
|
||||
"migration_0015_specific_upgrade": "Starting upgrade of system packages that needs to be upgrade independently...",
|
||||
"migration_0015_cleaning_up": "Cleaning up cache and packages not useful anymore...",
|
||||
"migration_0015_weak_certs": "The following certificates were found to still use weak signature algorithms and have to be upgraded to be compatible with the next version of nginx: {certs}",
|
||||
"migration_0017_postgresql_96_not_installed": "PostgreSQL was not installed on your system. Nothing to do.",
|
||||
"migration_0017_postgresql_11_not_installed": "PostgreSQL 9.6 is installed, but not postgresql 11‽ Something weird might have happened on your system :(...",
|
||||
"migration_0017_not_enough_space": "Make sufficient space available in {path} to run the migration.",
|
||||
"migration_0018_failed_to_migrate_iptables_rules": "Failed to migrate legacy iptables rules to nftables: {error}",
|
||||
"migration_0018_failed_to_reset_legacy_rules": "Failed to reset legacy iptables rules: {error}",
|
||||
"migration_0019_add_new_attributes_in_ldap": "Add new attributes for permissions in LDAP database",
|
||||
"migration_0019_backup_before_migration": "Creating a backup of LDAP database and apps settings prior to the actual migration.",
|
||||
"migration_0019_can_not_backup_before_migration": "The backup of the system could not be completed before the migration failed. Error: {error:s}",
|
||||
"migration_0019_migration_failed_trying_to_rollback": "Could not migrate… trying to roll back the system.",
|
||||
"migration_0019_rollback_success": "System rolled back.",
|
||||
"migration_0019_slapd_config_will_be_overwritten": "It looks like you manually edited the slapd configuration. For this critical migration, YunoHost needs to force the update of the slapd configuration. The original files will be backuped in {conf_backup_folder}.",
|
||||
"migration_0011_update_LDAP_schema": "Updating LDAP schema…",
|
||||
"migrations_already_ran": "Those migrations are already done: {ids}",
|
||||
"migrations_cant_reach_migration_file": "Could not access migrations files at the path '%s'",
|
||||
"migrations_dependencies_not_satisfied": "Run these migrations: '{dependencies_id}', before migration {id}.",
|
||||
"migrations_failed_to_load_migration": "Could not load migration {id}: {error}",
|
||||
"migrations_exclusive_options": "'--auto', '--skip', and '--force-rerun' are mutually exclusive options.",
|
||||
"migrations_list_conflict_pending_done": "You cannot use both '--previous' and '--done' at the same time.",
|
||||
"migrations_loading_migration": "Loading migration {id}…",
|
||||
"migrations_loading_migration": "Loading migration {id}...",
|
||||
"migrations_migration_has_failed": "Migration {id} did not complete, aborting. Error: {exception}",
|
||||
"migrations_must_provide_explicit_targets": "You must provide explicit targets when using '--skip' or '--force-rerun'",
|
||||
"migrations_need_to_accept_disclaimer": "To run the migration {id}, your must accept the following disclaimer:\n---\n{disclaimer}\n---\nIf you accept to run the migration, please re-run the command with the option '--accept-disclaimer'.",
|
||||
|
@ -486,11 +460,10 @@
|
|||
"migrations_no_such_migration": "There is no migration called '{id}'",
|
||||
"migrations_not_pending_cant_skip": "Those migrations are not pending, so cannot be skipped: {ids}",
|
||||
"migrations_pending_cant_rerun": "Those migrations are still pending, so cannot be run again: {ids}",
|
||||
"migrations_running_forward": "Running migration {id}…",
|
||||
"migrations_skip_migration": "Skipping migration {id}…",
|
||||
"migrations_running_forward": "Running migration {id}...",
|
||||
"migrations_skip_migration": "Skipping migration {id}...",
|
||||
"migrations_success_forward": "Migration {id} completed",
|
||||
"migrations_to_be_ran_manually": "Migration {id} has to be run manually. Please go to Tools → Migrations on the webadmin page, or run `yunohost tools migrations migrate`.",
|
||||
"no_internet_connection": "The server is not connected to the Internet",
|
||||
"not_enough_disk_space": "Not enough free space on '{path:s}'",
|
||||
"operation_interrupted": "The operation was manually interrupted?",
|
||||
"packages_upgrade_failed": "Could not upgrade all the packages",
|
||||
|
@ -541,12 +514,13 @@
|
|||
"regenconf_would_be_updated": "The configuration would have been updated for category '{category}'",
|
||||
"regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for category '{category}'…",
|
||||
"regenconf_failed": "Could not regenerate the configuration for category(s): {categories}",
|
||||
"regenconf_pending_applying": "Applying pending configuration for category '{category}'…",
|
||||
"regenconf_pending_applying": "Applying pending configuration for category '{category}'...",
|
||||
"regenconf_need_to_explicitly_specify_ssh": "The ssh configuration has been manually modified, but you need to explicitly specify category 'ssh' with --force to actually apply the changes.",
|
||||
"regex_incompatible_with_tile": "/!\\ Packagers! For the permission '{permission}' can't set the regex {regex} as main url and set 'show_tile' to 'true'",
|
||||
"regex_with_only_domain": "You can't use a regex for domain, only for path",
|
||||
"restore_already_installed_app": "An app with the ID '{app:s}' is already installed",
|
||||
"restore_already_installed_apps": "The following apps can't be restored because they are already installed: {apps}",
|
||||
"restore_app_failed": "Could not restore the app '{app:s}'",
|
||||
"restore_app_failed": "Could not restore {app:s}",
|
||||
"restore_cleaning_failed": "Could not clean up the temporary restoration directory",
|
||||
"restore_complete": "Restored",
|
||||
"restore_confirm_yunohost_installed": "Do you really want to restore an already installed system? [{answers:s}]",
|
||||
|
@ -578,8 +552,7 @@
|
|||
"service_description_metronome": "Manage XMPP instant messaging accounts",
|
||||
"service_description_mysql": "Stores app data (SQL database)",
|
||||
"service_description_nginx": "Serves or provides access to all the websites hosted on your server",
|
||||
"service_description_nslcd": "Handles YunoHost user shell connection",
|
||||
"service_description_php7.0-fpm": "Runs apps written in PHP with NGINX",
|
||||
"service_description_php7.3-fpm": "Runs apps written in PHP with NGINX",
|
||||
"service_description_postfix": "Used to send and receive e-mails",
|
||||
"service_description_redis-server": "A specialized database used for rapid data access, task queue, and communication between programs",
|
||||
"service_description_rspamd": "Filters spam, and other e-mail related features",
|
||||
|
@ -611,7 +584,7 @@
|
|||
"ssowat_conf_updated": "SSOwat configuration updated",
|
||||
"system_upgraded": "System upgraded",
|
||||
"system_username_exists": "Username already exists in the list of system users",
|
||||
"this_action_broke_dpkg": "This action broke dpkg/APT (the system package managers)… You can try to solve this issue by connecting through SSH and running `sudo apt install --fix-broken` and/or `sudo dpkg --configure -a`.",
|
||||
"this_action_broke_dpkg": "This action broke dpkg/APT (the system package managers)... You can try to solve this issue by connecting through SSH and running `sudo apt install --fix-broken` and/or `sudo dpkg --configure -a`.",
|
||||
"tools_upgrade_at_least_one": "Please specify '--apps', or '--system'",
|
||||
"tools_upgrade_cant_both": "Cannot upgrade both system and apps at the same time",
|
||||
"tools_upgrade_cant_hold_critical_packages": "Could not hold critical packages…",
|
||||
|
@ -621,16 +594,16 @@
|
|||
"tools_upgrade_special_packages": "Now upgrading 'special' (yunohost-related) packages…",
|
||||
"tools_upgrade_special_packages_explanation": "The special upgrade will continue in the background. Please don't start any other actions on your server for the next ~10 minutes (depending on hardware speed). After this, you may have to re-log in to the webadmin. The upgrade log will be available in Tools → Log (in the webadmin) or using 'yunohost log list' (from the command-line).",
|
||||
"tools_upgrade_special_packages_completed": "YunoHost package upgrade completed.\nPress [Enter] to get the command line back",
|
||||
"unbackup_app": "App '{app:s}' will not be saved",
|
||||
"unbackup_app": "{app:s} will not be saved",
|
||||
"unexpected_error": "Something unexpected went wrong: {error}",
|
||||
"unknown_main_domain_path": "Unknown domain or path for app '{app}'. You need to specify a domain and a path to be able to specify a url for permission.",
|
||||
"unlimit": "No quota",
|
||||
"unrestore_app": "App '{app:s}' will not be restored",
|
||||
"unrestore_app": "{app:s} will not be restored",
|
||||
"update_apt_cache_failed": "Could not to update the cache of APT (Debian's package manager). Here is a dump of the sources.list lines, which might help identify problematic lines: \n{sourceslist}",
|
||||
"update_apt_cache_warning": "Something went wrong while updating the cache of APT (Debian's package manager). Here is a dump of the sources.list lines, which might help identify problematic lines: \n{sourceslist}",
|
||||
"updating_apt_cache": "Fetching available upgrades for system packages…",
|
||||
"updating_apt_cache": "Fetching available upgrades for system packages...",
|
||||
"upgrade_complete": "Upgrade complete",
|
||||
"upgrading_packages": "Upgrading packages…",
|
||||
"upgrading_packages": "Upgrading packages...",
|
||||
"upnp_dev_not_found": "No UPnP device found",
|
||||
"upnp_disabled": "UPnP turned off",
|
||||
"upnp_enabled": "UPnP turned on",
|
||||
|
@ -649,7 +622,7 @@
|
|||
"yunohost_ca_creation_failed": "Could not create certificate authority",
|
||||
"yunohost_ca_creation_success": "Local certification authority created.",
|
||||
"yunohost_configured": "YunoHost is now configured",
|
||||
"yunohost_installing": "Installing YunoHost…",
|
||||
"yunohost_installing": "Installing YunoHost...",
|
||||
"yunohost_not_installed": "YunoHost is not correctly installed. Please run 'yunohost tools postinstall'",
|
||||
"yunohost_postinstall_end_tip": "The post-install completed! To finalize your setup, please consider:\n - adding a first user through the 'Users' section of the webadmin (or 'yunohost user create <username>' in command-line);\n - diagnose potential issues through the 'Diagnosis' section of the webadmin (or 'yunohost diagnosis run' in command-line);\n - reading the 'Finalizing your setup' and 'Getting to know Yunohost' parts in the admin documentation: https://yunohost.org/admindoc."
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@
|
|||
"mail_domain_unknown": "Le domaine '{domain:s}' de cette adresse de courriel n’est pas valide. Merci d’utiliser un domaine administré par ce serveur.",
|
||||
"mail_forward_remove_failed": "Impossible de supprimer le courriel de transfert '{mail:s}'",
|
||||
"main_domain_change_failed": "Impossible de modifier le domaine principal",
|
||||
"main_domain_changed": "Le domaine principal modifié",
|
||||
"main_domain_changed": "Le domaine principal a été modifié",
|
||||
"no_internet_connection": "Le serveur n’est pas connecté à Internet",
|
||||
"not_enough_disk_space": "L’espace disque est insuffisant sur '{path:s}'",
|
||||
"package_unknown": "Le paquet '{pkgname}' est inconnu",
|
||||
|
@ -168,7 +168,7 @@
|
|||
"certmanager_attempt_to_renew_valid_cert": "Le certificat pour le domaine {domain:s} n’est pas sur le point d’expirer ! (Vous pouvez utiliser --force si vous savez ce que vous faites)",
|
||||
"certmanager_domain_http_not_working": "Le domaine {domain:s} ne semble pas être accessible via HTTP. Merci de vérifier la catégorie 'Web' dans le diagnostic pour plus d'informations. (Ou si vous savez ce que vous faites, utilisez '--no-checks' pour désactiver la vérification.)",
|
||||
"certmanager_error_no_A_record": "Aucun enregistrement DNS 'A' n’a été trouvé pour {domain:s}. Vous devez faire pointer votre nom de domaine vers votre machine pour être en mesure d’installer un certificat Let’s Encrypt ! (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrement DNS 'A' du domaine {domain:s} est différent de l’adresse IP de ce serveur. Si vous avez récemment modifié votre enregistrement 'A', veuillez attendre sa propagation (des vérificateurs de propagation DNS sont disponibles en ligne). (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "L'enregistrement DNS du domaine {domain:s} est différent de l’adresse IP de ce serveur. Pour plus d'informations, veuillez consulter la catégorie \"Enregistrements DNS\" dans la section diagnostic. Si vous avez récemment modifié votre enregistrement 'A', veuillez attendre sa propagation (des vérificateurs de propagation DNS sont disponibles en ligne). (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)",
|
||||
"certmanager_cannot_read_cert": "Quelque chose s’est mal passé lors de la tentative d’ouverture du certificat actuel pour le domaine {domain:s} (fichier : {file:s}), la cause est : {reason:s}",
|
||||
"certmanager_cert_install_success_selfsigned": "Le certificat auto-signé est maintenant installé pour le domaine « {domain:s} »",
|
||||
"certmanager_cert_install_success": "Le certificat Let’s Encrypt est maintenant installé pour le domaine « {domain:s} »",
|
||||
|
@ -188,7 +188,7 @@
|
|||
"certmanager_http_check_timeout": "Expiration du délai lorsque le serveur a essayé de se contacter lui-même via HTTP en utilisant l’adresse IP public {ip:s} du domaine {domain:s}. Vous rencontrez peut-être un problème d’hairpinning ou alors le pare-feu/routeur en amont de votre serveur est mal configuré.",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Expiration du délai lors de la tentative de récupération du certificat intermédiaire depuis Let’s Encrypt. L’installation ou le renouvellement du certificat a été annulé. Veuillez réessayer plus tard.",
|
||||
"domain_hostname_failed": "Échec de l’utilisation d’un nouveau nom d’hôte. Cela pourrait causer des soucis plus tard (cela n’en causera peut-être pas).",
|
||||
"yunohost_ca_creation_success": "L’autorité de certification locale créée.",
|
||||
"yunohost_ca_creation_success": "L'autorité de certification locale a été créée.",
|
||||
"app_already_installed_cant_change_url": "Cette application est déjà installée. L’URL ne peut pas être changé simplement par cette fonction. Vérifiez si cela est disponible avec `app changeurl`.",
|
||||
"app_change_url_failed_nginx_reload": "Le redémarrage de Nginx a échoué. Voici la sortie de 'nginx -t' :\n{nginx_errors:s}",
|
||||
"app_change_url_identical_domains": "L’ancien et le nouveau couple domaine/chemin_de_l’URL sont identiques pour ('{domain:s}{path:s}'), rien à faire.",
|
||||
|
@ -392,7 +392,7 @@
|
|||
"service_reloaded_or_restarted": "Le service « {service:s} » a été rechargé ou redémarré",
|
||||
"this_action_broke_dpkg": "Cette action a laissé des paquets non configurés par dpkg/apt (les gestionnaires de paquets système). Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo apt install --fix-broken` et/ou `sudo dpkg --configure -a`.",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "Ces services requis doivent être en cours d’exécution pour exécuter cette action : {services}. Essayez de les redémarrer pour continuer (et éventuellement rechercher pourquoi ils sont en panne).",
|
||||
"admin_password_too_long": "Veuillez choisir un mot de passe de moins de 127 caractères",
|
||||
"admin_password_too_long": "Veuillez choisir un mot de passe comportant moins de 127 caractères",
|
||||
"log_regen_conf": "Régénérer les configurations du système '{}'",
|
||||
"migration_0009_not_needed": "Cette migration semble avoir déjà été jouée ? On l’ignore.",
|
||||
"regenconf_file_backed_up": "Le fichier de configuration '{conf}' a été sauvegardé sous '{backup}'",
|
||||
|
@ -426,7 +426,7 @@
|
|||
"tools_upgrade_special_packages_completed": "La mise à jour des paquets de YunoHost est finie !\nPressez [Entrée] pour revenir à la ligne de commande",
|
||||
"dpkg_lock_not_available": "Cette commande ne peut pas être exécutée pour le moment car un autre programme semble utiliser le verrou de dpkg (le gestionnaire de package système)",
|
||||
"tools_upgrade_cant_unhold_critical_packages": "Impossible de conserver les paquets critiques…",
|
||||
"tools_upgrade_special_packages_explanation": "La mise à niveau spéciale se poursuivra en arrière-plan. Veuillez ne pas lancer d'autres actions sur votre serveur pendant les 10 prochaines minutes (selon la vitesse du matériel). Après cela, vous devrez peut-être vous reconnecter à l'administrateur Web. Le journal de mise à niveau sera disponible dans Outils → Journal (dans le webadmin) ou en utilisant la « liste des journaux yunohost » (à partir de la ligne de commande).",
|
||||
"tools_upgrade_special_packages_explanation": "La mise à niveau spécifique à YunoHost se poursuivra en arrière-plan. Veuillez ne pas lancer d'autres actions sur votre serveur pendant les 10 prochaines minutes (selon la vitesse du matériel). Après cela, vous devrez peut-être vous reconnecter à l'administrateur Web. Le journal de mise à niveau sera disponible dans Outils → Journal (dans le webadmin) ou en utilisant la « liste des journaux yunohost » (à partir de la ligne de commande).",
|
||||
"update_apt_cache_failed": "Impossible de mettre à jour le cache APT (gestionnaire de paquets Debian). Voici un extrait du fichier sources.list qui pourrait vous aider à identifier les lignes problématiques :\n{sourceslist}",
|
||||
"update_apt_cache_warning": "Des erreurs se sont produites lors de la mise à jour du cache APT (gestionnaire de paquets Debian). Voici un extrait des lignes du fichier sources.list qui pourrait vous aider à identifier les lignes problématiques :\n{sourceslist}",
|
||||
"backup_permission": "Permission de sauvegarde pour l’application {app:s}",
|
||||
|
@ -535,7 +535,7 @@
|
|||
"diagnosis_ip_broken_resolvconf": "La résolution du nom de domaine semble être cassée sur votre serveur, ce qui semble lié au fait que <code>/etc/resolv.conf</code> ne pointe pas vers <code>127.0.0.1</code>.",
|
||||
"diagnosis_dns_good_conf": "Les enregistrements DNS sont correctement configurés pour le domaine {domain} (catégorie {category})",
|
||||
"diagnosis_dns_bad_conf": "Certains enregistrements DNS sont manquants ou incorrects pour le domaine {domain} (catégorie {category})",
|
||||
"diagnosis_dns_discrepancy": "Cet enregistrement DNS ne semble pas correspondre à la configuration recommandée :<br>Type : <code>{type}</code><br>Nom : <code>{name}</code><br>Valeur actuelle : <code>{current}</code><br>Valeur attendue : <code>{value}</code>",
|
||||
"diagnosis_dns_discrepancy": "Cet enregistrement DNS ne semble pas correspondre à la configuration recommandée : <br>Type : <code>{type}</code><br>Nom : <code>{name}</code><br> La valeur actuelle est : <code>{current}</code><br> La valeur attendue est : <code>{value}</code>",
|
||||
"diagnosis_services_bad_status": "Le service {service} est {status} :-(",
|
||||
"diagnosis_diskusage_verylow": "L'espace de stockage <code>{mountpoint}</code> (sur l’appareil <code>{device}</code>) ne dispose que de {free} ({free_percent}%) espace restant (sur {total}). Vous devriez vraiment envisager de nettoyer de l’espace !",
|
||||
"diagnosis_diskusage_low": "L'espace de stockage <code>{mountpoint}</code> (sur l'appareil <code>{device}</code>) ne dispose que de {free} ({free_percent}%) espace restant (sur {total}). Faites attention.",
|
||||
|
@ -649,5 +649,24 @@
|
|||
"diagnosis_domain_expiration_error": "Certains domaines vont expirer TRÈS PROCHAINEMENT !",
|
||||
"diagnosis_domain_expires_in": "{domain} expire dans {days} jours.",
|
||||
"certmanager_domain_not_diagnosed_yet": "Il n'y a pas encore de résultat de diagnostic pour le domaine %s. Merci de relancer un diagnostic pour les catégories 'Enregistrements DNS' et 'Web' dans la section Diagnostique pour vérifier si le domaine est prêt pour Let's Encrypt. (Ou si vous savez ce que vous faites, utilisez '--no-checks' pour désactiver la vérification.)",
|
||||
"diagnosis_swap_tip": "Merci d'être prudent et conscient que si vous hébergez une partition SWAP sur une carte SD ou un disque SSD, cela risque de réduire drastiquement l’espérance de vie du périphérique."
|
||||
"diagnosis_swap_tip": "Merci d'être prudent et conscient que si vous hébergez une partition SWAP sur une carte SD ou un disque SSD, cela risque de réduire drastiquement l’espérance de vie du périphérique.",
|
||||
"restore_already_installed_apps": "Les applications suivantes ne peuvent pas être restaurées car elles sont déjà installées : {apps}",
|
||||
"regenconf_need_to_explicitly_specify_ssh": "La configuration de ssh a été modifiée manuellement. Vous devez explicitement indiquer la mention --force à \"ssh\" pour appliquer les changements.",
|
||||
"migration_0015_cleaning_up": "Nettoyage du cache et des paquets qui ne sont plus utiles …",
|
||||
"migration_0015_specific_upgrade": "Commencement de la mise à jour des paquets du système qui doivent être mis à jour séparément …",
|
||||
"migration_0015_modified_files": "Veuillez noter que les fichiers suivants ont été modifiés manuellement et pourraient être écrasés à la suite de la mise à niveau : {manually_modified_files}",
|
||||
"migration_0015_problematic_apps_warning": "Veuillez noter que des applications qui peuvent poser problèmes ont été détectées. Il semble qu'elles n'aient pas été installées à partir du catalogue d'applications YunoHost, ou bien qu'elles ne soient pas signalées comme \"fonctionnelles\". Par conséquent, il n'est pas possible de garantir que les applications suivantes fonctionneront encore après la mise à niveau : {problematic_apps}",
|
||||
"migration_0015_general_warning": "Veuillez noter que cette migration est une opération délicate. L'équipe YunoHost a fait de son mieux pour la revérifier et la tester, mais la migration pourrait quand même casser des éléments du système ou de ses applications.\n\nIl est donc recommandé :\n…- de faire une sauvegarde de toute donnée ou application critique. Plus d'informations ici https://yunohost.org/backup ;\n…- d'être patient après le lancement de la migration. Selon votre connexion internet et votre matériel, la mise à niveau peut prendre jusqu'à quelques heures.",
|
||||
"migration_0015_system_not_fully_up_to_date": "Votre système n'est pas entièrement à jour. Veuillez effectuer une mise à jour normale avant de lancer la migration vers Buster.",
|
||||
"migration_0015_not_enough_free_space": "L'espace libre est très faible dans /var/ ! Vous devriez avoir au moins 1 Go de libre pour effectuer cette migration.",
|
||||
"migration_0015_not_stretch": "La distribution Debian actuelle n'est pas Stretch !",
|
||||
"migration_0015_yunohost_upgrade": "Démarrage de la mise à jour de YunoHost …",
|
||||
"migration_0015_still_on_stretch_after_main_upgrade": "Quelque chose s'est mal passé lors de la mise à niveau, le système semble toujours être sous Debian Stretch",
|
||||
"migration_0015_main_upgrade": "Démarrage de la mise à niveau générale …",
|
||||
"migration_0015_patching_sources_list": "Mise à jour du fichier sources.lists …",
|
||||
"migration_0015_start": "Démarrage de la migration vers Buster",
|
||||
"migration_description_0015_migrate_to_buster": "Mise à niveau du système vers Debian Buster et YunoHost 4.x",
|
||||
"diagnosis_dns_try_dyndns_update_force": "La configuration DNS de ce domaine devrait être automatiquement gérée par Yunohost. Si ce n'est pas le cas, vous pouvez essayer de forcer une mise à jour en utilisant <cmd>yunohost dyndns update --force</cmd>.",
|
||||
"app_packaging_format_not_supported": "Cette application ne peut pas être installée car son format n'est pas pris en charge par votre version de Yunohost. Vous devriez probablement envisager de mettre à jour votre système.",
|
||||
"migration_0015_weak_certs": "Il a été constaté que les certificats suivants utilisent encore des algorithmes de signature peu robustes et doivent être mis à jour pour être compatibles avec la prochaine version de nginx : {certs}"
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"app_already_installed": "{app:s} è già installata",
|
||||
"app_extraction_failed": "Impossibile estrarre i file di installazione",
|
||||
"app_not_installed": "{app:s} non è installata",
|
||||
"app_not_installed": "Impossibile trovare l'applicazione '{app:s}' nell'elenco delle applicazioni installate: {all_apps}",
|
||||
"app_unknown": "Applicazione sconosciuta",
|
||||
"ask_email": "Indirizzo email",
|
||||
"ask_password": "Password",
|
||||
|
@ -27,16 +27,16 @@
|
|||
"user_deleted": "L'utente è stato cancellato",
|
||||
"admin_password": "Password dell'amministrazione",
|
||||
"admin_password_change_failed": "Impossibile cambiare la password",
|
||||
"admin_password_changed": "La password dell'amministrazione è stata cambiata",
|
||||
"app_install_files_invalid": "Non sono validi i file di installazione",
|
||||
"app_manifest_invalid": "Manifesto dell'applicazione non valido: {error}",
|
||||
"admin_password_changed": "La password d'amministrazione è stata cambiata",
|
||||
"app_install_files_invalid": "Questi file non possono essere installati",
|
||||
"app_manifest_invalid": "C'è qualcosa di scorretto nel manifesto dell'applicazione: {error}",
|
||||
"app_not_correctly_installed": "{app:s} sembra di non essere installata correttamente",
|
||||
"app_not_properly_removed": "{app:s} non è stata correttamente rimossa",
|
||||
"action_invalid": "L'azione '{action:s}' non è valida",
|
||||
"app_removed": "{app:s} è stata rimossa",
|
||||
"app_sources_fetch_failed": "Impossibile riportare i file sorgenti",
|
||||
"app_upgrade_failed": "Impossibile aggiornare {app:s}",
|
||||
"app_upgraded": "{app:s} è stata aggiornata",
|
||||
"app_removed": "{app:s} rimossa",
|
||||
"app_sources_fetch_failed": "Impossibile riportare i file sorgenti, l'URL è corretto?",
|
||||
"app_upgrade_failed": "Impossibile aggiornare {app:s}: {error}",
|
||||
"app_upgraded": "{app:s} aggiornata",
|
||||
"app_requirements_checking": "Controllo i pacchetti richiesti per {app}…",
|
||||
"app_requirements_unmeet": "Requisiti non soddisfatti per {app}, il pacchetto {pkgname} ({version}) deve essere {spec}",
|
||||
"ask_firstname": "Nome",
|
||||
|
@ -175,11 +175,11 @@
|
|||
"app_already_up_to_date": "{app:s} è già aggiornata",
|
||||
"app_change_url_failed_nginx_reload": "Non riesco a riavviare NGINX. Questo è il risultato di 'nginx -t':\n{nginx_errors:s}",
|
||||
"app_change_url_identical_domains": "Il vecchio ed il nuovo dominio/percorso_url sono identici ('{domain:s}{path:s}'), nessuna operazione necessaria.",
|
||||
"app_change_url_no_script": "L'applicazione '{app_name:s}' non supporta ancora la modifica dell'URL. Forse dovresti aggiornare l'applicazione.",
|
||||
"app_change_url_success": "URL dell'applicazione {app:s} cambiato con successo in {domain:s}{path:s}",
|
||||
"app_make_default_location_already_used": "Impostazione dell'applicazione '{app}' come predefinita del dominio {domain} non riuscita perchè è già stata impostata per l'altra applicazione '{other_app}'",
|
||||
"app_location_unavailable": "Questo URL non è disponibile o va in conflitto con la/le applicazione/i già installata/e:\n{apps:s}",
|
||||
"app_upgrade_app_name": "Aggiornando l'applicazione {app}…",
|
||||
"app_change_url_no_script": "L'applicazione '{app_name:s}' non supporta ancora la modifica dell'URL. Forse dovresti aggiornarla.",
|
||||
"app_change_url_success": "L'URL dell'applicazione {app:s} è stato cambiato in {domain:s}{path:s}",
|
||||
"app_make_default_location_already_used": "Impostazione dell'applicazione '{app}' come predefinita del dominio non riuscita perché il dominio {domain} è già in uso per l'altra applicazione '{other_app}'",
|
||||
"app_location_unavailable": "Questo URL non è più disponibile o va in conflitto con la/le applicazione/i già installata/e:\n{apps:s}",
|
||||
"app_upgrade_app_name": "Aggiornamento dell'applicazione {app}…",
|
||||
"app_upgrade_some_app_failed": "Impossibile aggiornare alcune applicazioni",
|
||||
"backup_abstract_method": "Questo metodo di backup non è ancora stato implementato",
|
||||
"backup_applying_method_borg": "Inviando tutti i file da salvare nel backup nel deposito borg-backup…",
|
||||
|
@ -212,11 +212,11 @@
|
|||
"certmanager_cert_install_success": "Certificato Let's Encrypt per il dominio {domain:s} installato con successo!",
|
||||
"aborting": "Annullamento.",
|
||||
"admin_password_too_long": "Per favore scegli una password più corta di 127 caratteri",
|
||||
"app_not_upgraded": "Le seguenti app non sono state aggiornate: {apps}",
|
||||
"app_start_install": "Installando l'applicazione {app}…",
|
||||
"app_start_remove": "Rimuovendo l'applicazione {app}…",
|
||||
"app_start_backup": "Raccogliendo file da salvare nel backup per {app}…",
|
||||
"app_start_restore": "Ripristinando l'applicazione {app}…",
|
||||
"app_not_upgraded": "Impossibile aggiornare le applicazioni '{failed_app}' e di conseguenza l'aggiornamento delle seguenti applicazione è stato cancellato: {apps}",
|
||||
"app_start_install": "Installando l'applicazione '{app}'…",
|
||||
"app_start_remove": "Rimozione dell'applicazione {app}…",
|
||||
"app_start_backup": "Raccogliendo file da salvare nel backup per '{app}'…",
|
||||
"app_start_restore": "Ripristino dell'applicazione '{app}'…",
|
||||
"app_upgrade_several_apps": "Le seguenti app saranno aggiornate : {apps}",
|
||||
"ask_new_domain": "Nuovo dominio",
|
||||
"ask_new_path": "Nuovo percorso",
|
||||
|
@ -264,7 +264,7 @@
|
|||
"global_settings_reset_success": "Successo. Le tue impostazioni precedenti sono state salvate in {path:s}",
|
||||
"global_settings_setting_example_bool": "Esempio di opzione booleana",
|
||||
"global_settings_setting_example_enum": "Esempio di opzione enum",
|
||||
"already_up_to_date": "Niente da fare! Tutto è già aggiornato!",
|
||||
"already_up_to_date": "Niente da fare. Tutto è già aggiornato.",
|
||||
"global_settings_setting_example_int": "Esempio di opzione int",
|
||||
"global_settings_setting_example_string": "Esempio di opzione string",
|
||||
"global_settings_setting_security_nginx_compatibility": "Bilanciamento tra compatibilità e sicurezza per il server web nginx. Riguarda gli algoritmi di cifratura (e altri aspetti legati alla sicurezza)",
|
||||
|
@ -335,5 +335,10 @@
|
|||
"migration_0003_not_jessie": "La distribuzione attuale non è Jessie!",
|
||||
"migration_0003_system_not_fully_up_to_date": "Il tuo sistema non è completamente aggiornato. Per favore prima esegui un aggiornamento normale prima di migrare a stretch.",
|
||||
"this_action_broke_dpkg": "Questa azione ha danneggiato dpkg/apt (i gestori di pacchetti del sistema)… Puoi provare a risolvere questo problema connettendoti via SSH ed eseguendo `sudo dpkg --configure -a`.",
|
||||
"app_action_broke_system": "Questa azione sembra avere rotto questi servizi importanti: {services}"
|
||||
"app_action_broke_system": "Questa azione sembra avere rotto questi servizi importanti: {services}",
|
||||
"app_remove_after_failed_install": "Rimozione dell'applicazione a causa del fallimento dell'installazione…",
|
||||
"app_install_script_failed": "Si è verificato un errore nello script di installazione dell'applicazione",
|
||||
"app_install_failed": "Impossibile installare {app}:{error}",
|
||||
"app_full_domain_unavailable": "Spiacente, questa app deve essere installata su un proprio dominio, ma altre applicazioni sono state installate sul dominio '{domain}'. Dovresti invece usare un sotto-dominio dedicato per questa app.",
|
||||
"app_upgrade_script_failed": "È stato trovato un errore nello script di aggiornamento dell'applicazione"
|
||||
}
|
||||
|
|
|
@ -472,7 +472,7 @@
|
|||
"migrations_not_pending_cant_skip": "Aquestas migracions son pas en espèra, las podètz pas doncas ignorar : {ids}",
|
||||
"app_action_broke_system": "Aquesta accion sembla aver copat de servicis importants : {services}",
|
||||
"diagnosis_display_tip_web": "Podètz anar a la seccion Diagnostic (dins l’ecran d’acuèlh) per veire los problèmas trobats.",
|
||||
"diagnosis_ip_no_ipv6": "Lo servidor a pas d’adreça IPv5 activa.",
|
||||
"diagnosis_ip_no_ipv6": "Lo servidor a pas d’adreça IPv6 activa.",
|
||||
"diagnosis_ip_not_connected_at_all": "Lo servidor sembla pas connectat a Internet ?!",
|
||||
"diagnosis_security_all_good": "Cap de vulnerabilitat de seguretat critica pas trobada.",
|
||||
"diagnosis_description_regenconf": "Configuracion sistèma",
|
||||
|
@ -537,7 +537,7 @@
|
|||
"group_cannot_be_deleted": "Lo grop « {group} » pòt pas èsser suprimit manualament.",
|
||||
"diagnosis_found_warnings": "Trobat {warnings} element(s) que se poirián melhorar per {category}.",
|
||||
"diagnosis_dns_missing_record": "Segon la configuracion DNS recomandada, vos calriá ajustar un enregistrament DNS\ntipe: {type}\nnom: {name}\nvalor: {value}",
|
||||
"diagnosis_dns_discrepancy": "Segon la configuracion DNS recomandada, la valor per l’enregistrament DNS\ntipe: {type}\nnom: {name}\ndeuriá èsser: {current}\nallòc de: {value}",
|
||||
"diagnosis_dns_discrepancy": "La configuracion DNS seguenta sembla pas la configuracion recomandada : <br>Tipe : <code>{type}</code><br>Nom : <code>{name}</code><br>Valors actualas :<code> {current]</code><br>Valor esperada : <code>{value}</code>",
|
||||
"diagnosis_regenconf_manually_modified_debian_details": "Es pas problematic, mas car téner d’agacher...",
|
||||
"diagnosis_ports_could_not_diagnose": "Impossible de diagnosticar se los pòrts son accessibles de l’exterior.",
|
||||
"diagnosis_ports_could_not_diagnose_details": "Error : {error}",
|
||||
|
@ -580,5 +580,7 @@
|
|||
"diagnosis_basesystem_hardware": "L’arquitectura del servidor es {virt} {arch}",
|
||||
"diagnosis_basesystem_hardware_board": "Lo modèl de carta del servidor es {model}",
|
||||
"backup_archive_corrupted": "Sembla que l’archiu de la salvagarda « {archive} » es corromput : {error}",
|
||||
"diagnosis_domain_expires_in": "{domain} expiraà d’aquí {days} jorns."
|
||||
"diagnosis_domain_expires_in": "{domain} expiraà d’aquí {days} jorns.",
|
||||
"migration_0015_cleaning_up": "Netejatge de la memòria cache e dels paquets pas mai necessaris…",
|
||||
"restore_already_installed_apps": "Restauracion impossibla de las aplicacions seguentas que son ja installadas : {apps}"
|
||||
}
|
||||
|
|
|
@ -485,7 +485,7 @@ def app_upgrade(app=[], url=None, file=None):
|
|||
_patch_legacy_helpers(extracted_app_folder)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_php5(extracted_app_folder)
|
||||
_patch_legacy_php_versions(extracted_app_folder)
|
||||
|
||||
# Start register change on system
|
||||
related_to = [('app', app_instance_name)]
|
||||
|
@ -701,7 +701,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
_patch_legacy_helpers(extracted_app_folder)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_php5(extracted_app_folder)
|
||||
_patch_legacy_php_versions(extracted_app_folder)
|
||||
|
||||
# Prepare env. var. to pass to script
|
||||
env_dict = _make_environment_dict(args_odict)
|
||||
|
@ -832,6 +832,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
os.path.join(extracted_app_folder, 'scripts/remove'),
|
||||
args=[app_instance_name], env=env_dict_remove
|
||||
)[0]
|
||||
|
||||
# Here again, calling hook_exec could fail miserably, or get
|
||||
# manually interrupted (by mistake or because script was stuck)
|
||||
# In that case we still want to proceed with the rest of the
|
||||
|
@ -1000,7 +1001,7 @@ def app_remove(operation_logger, app):
|
|||
|
||||
# Apply dirty patch to make php5 apps compatible with php7 (e.g. the remove
|
||||
# script might date back from jessie install)
|
||||
_patch_php5(app_setting_path)
|
||||
_patch_legacy_php_versions(app_setting_path)
|
||||
|
||||
manifest = _get_manifest_of_app(app_setting_path)
|
||||
|
||||
|
@ -2683,12 +2684,6 @@ def _read_apps_catalog_list():
|
|||
Read the json corresponding to the list of apps catalogs
|
||||
"""
|
||||
|
||||
# Legacy code - can be removed after moving to buster (if the migration got merged before buster)
|
||||
if os.path.exists('/etc/yunohost/appslists.json'):
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
migration = _get_migration_by_name("futureproof_apps_catalog_system")
|
||||
migration.run()
|
||||
|
||||
try:
|
||||
list_ = read_yaml(APPS_CATALOG_CONF)
|
||||
# Support the case where file exists but is empty
|
||||
|
@ -2845,8 +2840,8 @@ def _assert_system_is_sane_for_app(manifest, when):
|
|||
|
||||
# Some apps use php-fpm or php5-fpm which is now php7.0-fpm
|
||||
def replace_alias(service):
|
||||
if service in ["php-fpm", "php5-fpm"]:
|
||||
return "php7.0-fpm"
|
||||
if service in ["php-fpm", "php5-fpm", "php7.0-fpm"]:
|
||||
return "php7.3-fpm"
|
||||
else:
|
||||
return service
|
||||
services = [replace_alias(s) for s in services]
|
||||
|
@ -2854,7 +2849,7 @@ def _assert_system_is_sane_for_app(manifest, when):
|
|||
# We only check those, mostly to ignore "custom" services
|
||||
# (added by apps) and because those are the most popular
|
||||
# services
|
||||
service_filter = ["nginx", "php7.0-fpm", "mysql", "postfix"]
|
||||
service_filter = ["nginx", "php7.3-fpm", "mysql", "postfix"]
|
||||
services = [str(s) for s in services if s in service_filter]
|
||||
|
||||
if "nginx" not in services:
|
||||
|
@ -2879,11 +2874,24 @@ def _assert_system_is_sane_for_app(manifest, when):
|
|||
raise YunohostError("this_action_broke_dpkg")
|
||||
|
||||
|
||||
def _patch_php5(app_folder):
|
||||
LEGACY_PHP_VERSION_REPLACEMENTS = [
|
||||
("/etc/php5", "/etc/php/7.3"),
|
||||
("/etc/php/7.0", "/etc/php/7.3"),
|
||||
("/var/run/php5-fpm", "/var/run/php/php7.3-fpm"),
|
||||
("/var/run/php/php7.0-fpm", "/var/run/php/php7.3-fpm"),
|
||||
("php5", "php7.3"),
|
||||
("php7.0", "php7.3"),
|
||||
('phpversion="${phpversion:-7.0}"', 'phpversion="${phpversion:-7.3}"'), # Many helpers like the composer ones use 7.0 by default ...
|
||||
('"$phpversion" == "7.0"', '$(bc <<< "$phpversion >= 7.3") -eq 1') # patch ynh_install_php to refuse installing/removing php <= 7.3
|
||||
]
|
||||
|
||||
|
||||
def _patch_legacy_php_versions(app_folder):
|
||||
|
||||
files_to_patch = []
|
||||
files_to_patch.extend(glob.glob("%s/conf/*" % app_folder))
|
||||
files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder))
|
||||
files_to_patch.extend(glob.glob("%s/scripts/*/*" % app_folder))
|
||||
files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder))
|
||||
files_to_patch.append("%s/manifest.json" % app_folder)
|
||||
files_to_patch.append("%s/manifest.toml" % app_folder)
|
||||
|
@ -2894,12 +2902,32 @@ def _patch_php5(app_folder):
|
|||
if not os.path.isfile(filename):
|
||||
continue
|
||||
|
||||
c = "sed -i -e 's@/etc/php5@/etc/php/7.0@g' " \
|
||||
"-e 's@/var/run/php5-fpm@/var/run/php/php7.0-fpm@g' " \
|
||||
"-e 's@php5@php7.0@g' " \
|
||||
"%s" % filename
|
||||
c = "sed -i " \
|
||||
+ "".join("-e 's@{pattern}@{replace}@g' ".format(pattern=p, replace=r) for p, r in LEGACY_PHP_VERSION_REPLACEMENTS) \
|
||||
+ "%s" % filename
|
||||
os.system(c)
|
||||
|
||||
|
||||
def _patch_legacy_php_versions_in_settings(app_folder):
|
||||
|
||||
settings = read_yaml(os.path.join(app_folder, 'settings.yml'))
|
||||
|
||||
if settings.get("fpm_config_dir") == "/etc/php/7.0/fpm":
|
||||
settings["fpm_config_dir"] = "/etc/php/7.3/fpm"
|
||||
if settings.get("fpm_service") == "php7.0-fpm":
|
||||
settings["fpm_service"] = "php7.3-fpm"
|
||||
if settings.get("phpversion") == "7.0":
|
||||
settings["phpversion"] = "7.3"
|
||||
|
||||
# We delete these checksums otherwise the file will appear as manually modified
|
||||
list_to_remove = ["checksum__etc_php_7.0_fpm_pool",
|
||||
"checksum__etc_nginx_conf.d"]
|
||||
settings = {k: v for k, v in settings.items()
|
||||
if not any(k.startswith(to_remove) for to_remove in list_to_remove)}
|
||||
|
||||
write_to_yaml(app_folder + '/settings.yml', settings)
|
||||
|
||||
|
||||
def _patch_legacy_helpers(app_folder):
|
||||
|
||||
files_to_patch = []
|
||||
|
|
|
@ -43,7 +43,13 @@ from moulinette.utils.log import getActionLogger
|
|||
from moulinette.utils.filesystem import read_file, mkdir, write_to_yaml, read_yaml
|
||||
|
||||
from yunohost.app import (
|
||||
app_info, _is_installed, _parse_app_instance_name, _patch_php5, dump_app_log_extract_for_debugging, _patch_legacy_helpers
|
||||
app_info, _is_installed,
|
||||
_parse_app_instance_name,
|
||||
dump_app_log_extract_for_debugging,
|
||||
_patch_legacy_helpers,
|
||||
_patch_legacy_php_versions,
|
||||
_patch_legacy_php_versions_in_settings,
|
||||
LEGACY_PHP_VERSION_REPLACEMENTS
|
||||
)
|
||||
from yunohost.hook import (
|
||||
hook_list, hook_info, hook_callback, hook_exec, CUSTOM_HOOK_FOLDER
|
||||
|
@ -1141,7 +1147,7 @@ class RestoreManager():
|
|||
self._postinstall_if_needed()
|
||||
|
||||
# Apply dirty patch to redirect php5 file on php7
|
||||
self._patch_backup_csv_file()
|
||||
self._patch_legacy_php_versions_in_csv_file()
|
||||
|
||||
self._restore_system()
|
||||
self._restore_apps()
|
||||
|
@ -1150,9 +1156,9 @@ class RestoreManager():
|
|||
finally:
|
||||
self.clean()
|
||||
|
||||
def _patch_backup_csv_file(self):
|
||||
def _patch_legacy_php_versions_in_csv_file(self):
|
||||
"""
|
||||
Apply dirty patch to redirect php5 file on php7
|
||||
Apply dirty patch to redirect php5 and php7.0 files to php7.3
|
||||
"""
|
||||
|
||||
backup_csv = os.path.join(self.work_dir, 'backup.csv')
|
||||
|
@ -1160,32 +1166,27 @@ class RestoreManager():
|
|||
if not os.path.isfile(backup_csv):
|
||||
return
|
||||
|
||||
contains_php5 = False
|
||||
replaced_something = False
|
||||
with open(backup_csv) as csvfile:
|
||||
reader = csv.DictReader(csvfile, fieldnames=['source', 'dest'])
|
||||
newlines = []
|
||||
for row in reader:
|
||||
if 'php5' in row['source']:
|
||||
contains_php5 = True
|
||||
row['source'] = row['source'].replace('/etc/php5', '/etc/php/7.0') \
|
||||
.replace('/var/run/php5-fpm', '/var/run/php/php7.0-fpm') \
|
||||
.replace('php5', 'php7')
|
||||
for pattern, replace in LEGACY_PHP_VERSION_REPLACEMENTS:
|
||||
if pattern in row['source']:
|
||||
replaced_something = True
|
||||
row['source'] = row['source'].replace(pattern, replace)
|
||||
|
||||
newlines.append(row)
|
||||
|
||||
if not contains_php5:
|
||||
if not replaced_something:
|
||||
return
|
||||
|
||||
try:
|
||||
with open(backup_csv, 'w') as csvfile:
|
||||
writer = csv.DictWriter(csvfile,
|
||||
fieldnames=['source', 'dest'],
|
||||
quoting=csv.QUOTE_ALL)
|
||||
for row in newlines:
|
||||
writer.writerow(row)
|
||||
except (IOError, OSError, csv.Error) as e:
|
||||
logger.warning(m18n.n('backup_php5_to_php7_migration_may_fail',
|
||||
error=str(e)))
|
||||
with open(backup_csv, 'w') as csvfile:
|
||||
writer = csv.DictWriter(csvfile,
|
||||
fieldnames=['source', 'dest'],
|
||||
quoting=csv.QUOTE_ALL)
|
||||
for row in newlines:
|
||||
writer.writerow(row)
|
||||
|
||||
def _restore_system(self):
|
||||
""" Restore user and system parts """
|
||||
|
@ -1244,12 +1245,11 @@ class RestoreManager():
|
|||
#
|
||||
# Legacy code
|
||||
if not "all_users" in user_group_list()["groups"].keys():
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
setup_group_permission = _get_migration_by_name("setup_group_permission")
|
||||
from yunohost.utils.legacy import SetupGroupPermissions
|
||||
# Update LDAP schema restart slapd
|
||||
logger.info(m18n.n("migration_0011_update_LDAP_schema"))
|
||||
regen_conf(names=['slapd'], force=True)
|
||||
setup_group_permission.migrate_LDAP_db()
|
||||
SetupGroupPermissions.migrate_LDAP_db()
|
||||
|
||||
# Remove all permission for all app which is still in the LDAP
|
||||
for permission_name in user_permission_list(ignore_system_perms=True)["permissions"].keys():
|
||||
|
@ -1340,7 +1340,8 @@ class RestoreManager():
|
|||
_patch_legacy_helpers(app_settings_in_archive)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_php5(app_settings_in_archive)
|
||||
_patch_legacy_php_versions(app_settings_in_archive)
|
||||
_patch_legacy_php_versions_in_settings(app_settings_in_archive)
|
||||
|
||||
# Delete _common.sh file in backup
|
||||
common_file = os.path.join(app_backup_in_archive, '_common.sh')
|
||||
|
@ -1398,9 +1399,8 @@ class RestoreManager():
|
|||
else:
|
||||
# Otherwise, we need to migrate the legacy permissions of this
|
||||
# app (included in its settings.yml)
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
setup_group_permission = _get_migration_by_name("setup_group_permission")
|
||||
setup_group_permission.migrate_app_permission(app=app_instance_name)
|
||||
from yunohost.utils.legacy import SetupGroupPermissions
|
||||
SetupGroupPermissions.migrate_app_permission(app=app_instance_name)
|
||||
|
||||
# Prepare env. var. to pass to script
|
||||
env_dict = self._get_env_var(app_instance_name)
|
||||
|
|
|
@ -369,12 +369,11 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
|
|||
if not no_checks:
|
||||
try:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
except:
|
||||
msg = "Certificate renewing for %s failed !" % (domain)
|
||||
logger.error(msg)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
if email:
|
||||
logger.error("Sending email with details to root ...")
|
||||
_email_renewing_failed(domain, msg)
|
||||
_email_renewing_failed(domain, e)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
import subprocess
|
||||
import glob
|
||||
from yunohost.tools import Migration
|
||||
from moulinette.utils.filesystem import chown
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Change certificates group permissions from 'metronome' to 'ssl-cert'"
|
||||
|
||||
all_certificate_files = glob.glob("/etc/yunohost/certs/*/*.pem")
|
||||
|
||||
def run(self):
|
||||
for filename in self.all_certificate_files:
|
||||
chown(filename, uid="root", gid="ssl-cert")
|
|
@ -1,86 +0,0 @@
|
|||
import glob
|
||||
import os
|
||||
import requests
|
||||
import base64
|
||||
import time
|
||||
import json
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.dyndns import _guess_current_dyndns_domain
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate Dyndns stuff from MD5 TSIG to SHA512 TSIG"
|
||||
|
||||
def run(self, dyn_host="dyndns.yunohost.org", domain=None, private_key_path=None):
|
||||
|
||||
if domain is None or private_key_path is None:
|
||||
try:
|
||||
(domain, private_key_path) = _guess_current_dyndns_domain(dyn_host)
|
||||
assert "+157" in private_key_path
|
||||
except (YunohostError, AssertionError):
|
||||
logger.info(m18n.n("migrate_tsig_not_needed"))
|
||||
return
|
||||
|
||||
logger.info(m18n.n('migrate_tsig_start', domain=domain))
|
||||
public_key_path = private_key_path.rsplit(".private", 1)[0] + ".key"
|
||||
public_key_md5 = open(public_key_path).read().strip().split(' ')[-1]
|
||||
|
||||
os.system('cd /etc/yunohost/dyndns && '
|
||||
'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain)
|
||||
os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private')
|
||||
|
||||
# +165 means that this file store a hmac-sha512 key
|
||||
new_key_path = glob.glob('/etc/yunohost/dyndns/*+165*.key')[0]
|
||||
public_key_sha512 = open(new_key_path).read().strip().split(' ', 6)[-1]
|
||||
|
||||
try:
|
||||
r = requests.put('https://%s/migrate_key_to_sha512/' % (dyn_host),
|
||||
data={
|
||||
'public_key_md5': base64.b64encode(public_key_md5),
|
||||
'public_key_sha512': base64.b64encode(public_key_sha512),
|
||||
}, timeout=30)
|
||||
except requests.ConnectionError:
|
||||
raise YunohostError('no_internet_connection')
|
||||
|
||||
if r.status_code != 201:
|
||||
try:
|
||||
error = json.loads(r.text)['error']
|
||||
except Exception:
|
||||
# failed to decode json
|
||||
error = r.text
|
||||
|
||||
import traceback
|
||||
from StringIO import StringIO
|
||||
stack = StringIO()
|
||||
traceback.print_stack(file=stack)
|
||||
logger.error(stack.getvalue())
|
||||
|
||||
# Migration didn't succeed, so we rollback and raise an exception
|
||||
os.system("mv /etc/yunohost/dyndns/*+165* /tmp")
|
||||
|
||||
raise YunohostError('migrate_tsig_failed', domain=domain,
|
||||
error_code=str(r.status_code), error=error)
|
||||
|
||||
# remove old certificates
|
||||
os.system("mv /etc/yunohost/dyndns/*+157* /tmp")
|
||||
|
||||
# sleep to wait for dyndns cache invalidation
|
||||
logger.info(m18n.n('migrate_tsig_wait'))
|
||||
time.sleep(60)
|
||||
logger.info(m18n.n('migrate_tsig_wait_2'))
|
||||
time.sleep(60)
|
||||
logger.info(m18n.n('migrate_tsig_wait_3'))
|
||||
time.sleep(30)
|
||||
logger.info(m18n.n('migrate_tsig_wait_4'))
|
||||
time.sleep(30)
|
||||
|
||||
logger.info(m18n.n('migrate_tsig_end'))
|
||||
return
|
|
@ -1,379 +0,0 @@
|
|||
import glob
|
||||
import os
|
||||
from shutil import copy2
|
||||
|
||||
from moulinette import m18n, msettings
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.process import check_output, call_async_output
|
||||
from moulinette.utils.filesystem import read_file
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.app import unstable_apps
|
||||
from yunohost.service import _run_service_command
|
||||
from yunohost.regenconf import (manually_modified_files,
|
||||
manually_modified_files_compared_to_debian_default)
|
||||
from yunohost.utils.filesystem import free_space_in_directory
|
||||
from yunohost.utils.packages import get_ynh_package_version
|
||||
from yunohost.utils.network import get_network_interfaces
|
||||
from yunohost.firewall import firewall_allow, firewall_disallow
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
YUNOHOST_PACKAGES = ["yunohost", "yunohost-admin", "moulinette", "ssowat"]
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Upgrade the system to Debian Stretch and Yunohost 3.0"
|
||||
|
||||
mode = "manual"
|
||||
|
||||
def run(self):
|
||||
|
||||
self.logfile = "/var/log/yunohost/{}.log".format(self.name)
|
||||
|
||||
self.check_assertions()
|
||||
|
||||
logger.info(m18n.n("migration_0003_start", logfile=self.logfile))
|
||||
|
||||
# Preparing the upgrade
|
||||
self.restore_original_nginx_conf_if_needed()
|
||||
|
||||
logger.info(m18n.n("migration_0003_patching_sources_list"))
|
||||
self.patch_apt_sources_list()
|
||||
self.backup_files_to_keep()
|
||||
self.apt_update()
|
||||
apps_packages = self.get_apps_equivs_packages()
|
||||
self.unhold(["metronome"])
|
||||
self.hold(YUNOHOST_PACKAGES + apps_packages + ["fail2ban"])
|
||||
|
||||
# Main dist-upgrade
|
||||
logger.info(m18n.n("migration_0003_main_upgrade"))
|
||||
_run_service_command("stop", "mysql")
|
||||
self.apt_dist_upgrade(conf_flags=["old", "miss", "def"])
|
||||
_run_service_command("start", "mysql")
|
||||
if self.debian_major_version() == 8:
|
||||
raise YunohostError("migration_0003_still_on_jessie_after_main_upgrade", log=self.logfile)
|
||||
|
||||
# Specific upgrade for fail2ban...
|
||||
logger.info(m18n.n("migration_0003_fail2ban_upgrade"))
|
||||
self.unhold(["fail2ban"])
|
||||
# Don't move this if folder already exists. If it does, we probably are
|
||||
# running this script a 2nd, 3rd, ... time but /etc/fail2ban will
|
||||
# be re-created only for the first dist-upgrade of fail2ban
|
||||
if not os.path.exists("/etc/fail2ban.old"):
|
||||
os.system("mv /etc/fail2ban /etc/fail2ban.old")
|
||||
self.apt_dist_upgrade(conf_flags=["new", "miss", "def"])
|
||||
_run_service_command("restart", "fail2ban")
|
||||
|
||||
self.disable_predicable_interface_names()
|
||||
|
||||
# Clean the mess
|
||||
os.system("apt autoremove --assume-yes")
|
||||
os.system("apt clean --assume-yes")
|
||||
|
||||
# We moved to port 587 for SMTP
|
||||
# https://busylog.net/smtp-tls-ssl-25-465-587/
|
||||
firewall_allow("Both", 587)
|
||||
firewall_disallow("Both", 465)
|
||||
|
||||
# Upgrade yunohost packages
|
||||
logger.info(m18n.n("migration_0003_yunohost_upgrade"))
|
||||
self.restore_files_to_keep()
|
||||
self.unhold(YUNOHOST_PACKAGES + apps_packages)
|
||||
self.upgrade_yunohost_packages()
|
||||
|
||||
def debian_major_version(self):
|
||||
# The python module "platform" and lsb_release are not reliable because
|
||||
# on some setup, they still return Release=8 even after upgrading to
|
||||
# stretch ... (Apparently this is related to OVH overriding some stuff
|
||||
# with /etc/lsb-release for instance -_-)
|
||||
# Instead, we rely on /etc/os-release which should be the raw info from
|
||||
# the distribution...
|
||||
return int(check_output("grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2"))
|
||||
|
||||
def yunohost_major_version(self):
|
||||
return int(get_ynh_package_version("yunohost")["version"].split('.')[0])
|
||||
|
||||
def check_assertions(self):
|
||||
|
||||
# Be on jessie (8.x) and yunohost 2.x
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be >= 9.x but yunohost package
|
||||
# would still be in 2.x...
|
||||
if not self.debian_major_version() == 8 \
|
||||
and not self.yunohost_major_version() == 2:
|
||||
raise YunohostError("migration_0003_not_jessie")
|
||||
|
||||
# Have > 1 Go free space on /var/ ?
|
||||
if free_space_in_directory("/var/") / (1024**3) < 1.0:
|
||||
raise YunohostError("There is not enough free space in /var/ to run the migration. You need at least 1GB free space")
|
||||
|
||||
# Check system is up to date
|
||||
# (but we don't if 'stretch' is already in the sources.list ...
|
||||
# which means maybe a previous upgrade crashed and we're re-running it)
|
||||
if " stretch " not in read_file("/etc/apt/sources.list"):
|
||||
self.apt_update()
|
||||
apt_list_upgradable = check_output("apt list --upgradable -a")
|
||||
if "upgradable" in apt_list_upgradable:
|
||||
raise YunohostError("migration_0003_system_not_fully_up_to_date")
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
|
||||
# Avoid having a super long disclaimer + uncessary check if we ain't
|
||||
# on jessie / yunohost 2.x anymore
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be >= 9.x but yunohost package
|
||||
# would still be in 2.x...
|
||||
if not self.debian_major_version() == 8 \
|
||||
and not self.yunohost_major_version() == 2:
|
||||
return None
|
||||
|
||||
# Get list of problematic apps ? I.e. not official or community+working
|
||||
problematic_apps = unstable_apps()
|
||||
problematic_apps = "".join(["\n - " + app for app in problematic_apps])
|
||||
|
||||
# Manually modified files ? (c.f. yunohost service regen-conf)
|
||||
modified_files = manually_modified_files()
|
||||
# We also have a specific check for nginx.conf which some people
|
||||
# modified and needs to be upgraded...
|
||||
if "/etc/nginx/nginx.conf" in manually_modified_files_compared_to_debian_default():
|
||||
modified_files.append("/etc/nginx/nginx.conf")
|
||||
modified_files = "".join(["\n - " + f for f in modified_files])
|
||||
|
||||
message = m18n.n("migration_0003_general_warning")
|
||||
|
||||
if problematic_apps:
|
||||
message += "\n\n" + m18n.n("migration_0003_problematic_apps_warning", problematic_apps=problematic_apps)
|
||||
|
||||
if modified_files:
|
||||
message += "\n\n" + m18n.n("migration_0003_modified_files", manually_modified_files=modified_files)
|
||||
|
||||
return message
|
||||
|
||||
def patch_apt_sources_list(self):
|
||||
|
||||
sources_list = glob.glob("/etc/apt/sources.list.d/*.list")
|
||||
sources_list.append("/etc/apt/sources.list")
|
||||
|
||||
# This :
|
||||
# - replace single 'jessie' occurence by 'stretch'
|
||||
# - comments lines containing "backports"
|
||||
# - replace 'jessie/updates' by 'strech/updates' (or same with a -)
|
||||
# - switch yunohost's repo to forge
|
||||
for f in sources_list:
|
||||
command = "sed -i -e 's@ jessie @ stretch @g' " \
|
||||
"-e '/backports/ s@^#*@#@' " \
|
||||
"-e 's@ jessie/updates @ stretch/updates @g' " \
|
||||
"-e 's@ jessie-updates @ stretch-updates @g' " \
|
||||
"-e 's@repo.yunohost@forge.yunohost@g' " \
|
||||
"{}".format(f)
|
||||
os.system(command)
|
||||
|
||||
def get_apps_equivs_packages(self):
|
||||
|
||||
command = "dpkg --get-selections" \
|
||||
" | grep -v deinstall" \
|
||||
" | awk '{print $1}'" \
|
||||
" | { grep 'ynh-deps$' || true; }"
|
||||
|
||||
output = check_output(command).strip()
|
||||
|
||||
return output.split('\n') if output else []
|
||||
|
||||
def hold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark hold {}".format(package))
|
||||
|
||||
def unhold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark unhold {}".format(package))
|
||||
|
||||
def apt_update(self):
|
||||
|
||||
command = "apt-get update"
|
||||
logger.debug("Running apt command :\n{}".format(command))
|
||||
command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
os.system(command)
|
||||
|
||||
def upgrade_yunohost_packages(self):
|
||||
|
||||
#
|
||||
# Here we use a dirty hack to run a command after the current
|
||||
# "yunohost tools migrations migrate", because the upgrade of
|
||||
# yunohost will also trigger another "yunohost tools migrations migrate"
|
||||
# (also the upgrade of the package, if executed from the webadmin, is
|
||||
# likely to kill/restart the api which is in turn likely to kill this
|
||||
# command before it ends...)
|
||||
#
|
||||
|
||||
MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock"
|
||||
|
||||
upgrade_command = ""
|
||||
upgrade_command += " DEBIAN_FRONTEND=noninteractive"
|
||||
upgrade_command += " APT_LISTCHANGES_FRONTEND=none"
|
||||
upgrade_command += " apt-get install"
|
||||
upgrade_command += " --assume-yes "
|
||||
upgrade_command += " ".join(YUNOHOST_PACKAGES)
|
||||
# We also install php-zip and php7.0-acpu to fix an issue with
|
||||
# nextcloud and kanboard that need it when on stretch.
|
||||
upgrade_command += " php-zip php7.0-apcu"
|
||||
upgrade_command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
wait_until_end_of_yunohost_command = "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK)
|
||||
|
||||
command = "({} && {}; echo 'Migration complete!') &".format(wait_until_end_of_yunohost_command,
|
||||
upgrade_command)
|
||||
|
||||
logger.debug("Running command :\n{}".format(command))
|
||||
|
||||
os.system(command)
|
||||
|
||||
def apt_dist_upgrade(self, conf_flags):
|
||||
|
||||
# Make apt-get happy
|
||||
os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections")
|
||||
# Don't send an email to root about the postgresql migration. It should be handled automatically after.
|
||||
os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections")
|
||||
|
||||
command = ""
|
||||
command += " DEBIAN_FRONTEND=noninteractive"
|
||||
command += " APT_LISTCHANGES_FRONTEND=none"
|
||||
command += " apt-get"
|
||||
command += " --fix-broken --show-upgraded --assume-yes"
|
||||
for conf_flag in conf_flags:
|
||||
command += ' -o Dpkg::Options::="--force-conf{}"'.format(conf_flag)
|
||||
command += " dist-upgrade"
|
||||
|
||||
logger.debug("Running apt command :\n{}".format(command))
|
||||
|
||||
command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
is_api = msettings.get('interface') == 'api'
|
||||
if is_api:
|
||||
callbacks = (
|
||||
lambda l: logger.info(l.rstrip()),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
)
|
||||
call_async_output(command, callbacks, shell=True)
|
||||
else:
|
||||
# We do this when running from the cli to have the output of the
|
||||
# command showing in the terminal, since 'info' channel is only
|
||||
# enabled if the user explicitly add --verbose ...
|
||||
os.system(command)
|
||||
|
||||
# Those are files that should be kept and restored before the final switch
|
||||
# to yunohost 3.x... They end up being modified by the various dist-upgrades
|
||||
# (or need to be taken out momentarily), which then blocks the regen-conf
|
||||
# as they are flagged as "manually modified"...
|
||||
files_to_keep = [
|
||||
"/etc/mysql/my.cnf",
|
||||
"/etc/nslcd.conf",
|
||||
"/etc/postfix/master.cf",
|
||||
"/etc/fail2ban/filter.d/yunohost.conf"
|
||||
]
|
||||
|
||||
def backup_files_to_keep(self):
|
||||
|
||||
logger.debug("Backuping specific files to keep ...")
|
||||
|
||||
# Create tmp directory if it does not exists
|
||||
tmp_dir = os.path.join("/tmp/", self.name)
|
||||
if not os.path.exists(tmp_dir):
|
||||
os.mkdir(tmp_dir, 0o700)
|
||||
|
||||
for f in self.files_to_keep:
|
||||
dest_file = f.strip('/').replace("/", "_")
|
||||
|
||||
# If the file is already there, we might be re-running the migration
|
||||
# because it previously crashed. Hence we keep the existing file.
|
||||
if os.path.exists(os.path.join(tmp_dir, dest_file)):
|
||||
continue
|
||||
|
||||
copy2(f, os.path.join(tmp_dir, dest_file))
|
||||
|
||||
def restore_files_to_keep(self):
|
||||
|
||||
logger.debug("Restoring specific files to keep ...")
|
||||
|
||||
tmp_dir = os.path.join("/tmp/", self.name)
|
||||
|
||||
for f in self.files_to_keep:
|
||||
dest_file = f.strip('/').replace("/", "_")
|
||||
copy2(os.path.join(tmp_dir, dest_file), f)
|
||||
|
||||
# On some setups, /etc/nginx/nginx.conf got edited. But this file needs
|
||||
# to be upgraded because of the way the new module system works for nginx.
|
||||
# (in particular, having the line that include the modules at the top)
|
||||
#
|
||||
# So here, if it got edited, we force the restore of the original conf
|
||||
# *before* starting the actual upgrade...
|
||||
#
|
||||
# An alternative strategy that was attempted was to hold the nginx-common
|
||||
# package and have a specific upgrade for it like for fail2ban, but that
|
||||
# leads to apt complaining about not being able to upgrade for shitty
|
||||
# reasons >.>
|
||||
def restore_original_nginx_conf_if_needed(self):
|
||||
if "/etc/nginx/nginx.conf" not in manually_modified_files_compared_to_debian_default():
|
||||
return
|
||||
|
||||
if not os.path.exists("/etc/nginx/nginx.conf"):
|
||||
return
|
||||
|
||||
# If stretch is in the sources.list, we already started migrating on
|
||||
# stretch so we don't re-do this
|
||||
if " stretch " in read_file("/etc/apt/sources.list"):
|
||||
return
|
||||
|
||||
backup_dest = "/home/yunohost.conf/backup/nginx.conf.bkp_before_stretch"
|
||||
|
||||
logger.warning(m18n.n("migration_0003_restoring_origin_nginx_conf",
|
||||
backup_dest=backup_dest))
|
||||
|
||||
os.system("mv /etc/nginx/nginx.conf %s" % backup_dest)
|
||||
|
||||
command = ""
|
||||
command += " DEBIAN_FRONTEND=noninteractive"
|
||||
command += " APT_LISTCHANGES_FRONTEND=none"
|
||||
command += " apt-get"
|
||||
command += " --fix-broken --show-upgraded --assume-yes"
|
||||
command += ' -o Dpkg::Options::="--force-confmiss"'
|
||||
command += " install --reinstall"
|
||||
command += " nginx-common"
|
||||
|
||||
logger.debug("Running apt command :\n{}".format(command))
|
||||
|
||||
command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
is_api = msettings.get('interface') == 'api'
|
||||
if is_api:
|
||||
callbacks = (
|
||||
lambda l: logger.info(l.rstrip()),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
)
|
||||
call_async_output(command, callbacks, shell=True)
|
||||
else:
|
||||
# We do this when running from the cli to have the output of the
|
||||
# command showing in the terminal, since 'info' channel is only
|
||||
# enabled if the user explicitly add --verbose ...
|
||||
os.system(command)
|
||||
|
||||
def disable_predicable_interface_names(self):
|
||||
|
||||
# Try to see if currently used interface names are predictable ones or not...
|
||||
# If we ain't using "eth0" or "wlan0", assume we are using predictable interface
|
||||
# names and therefore they shouldnt be disabled
|
||||
network_interfaces = get_network_interfaces().keys()
|
||||
if "eth0" not in network_interfaces and "wlan0" not in network_interfaces:
|
||||
return
|
||||
|
||||
interfaces_config = read_file("/etc/network/interfaces")
|
||||
if "eth0" not in interfaces_config and "wlan0" not in interfaces_config:
|
||||
return
|
||||
|
||||
# Disable predictive interface names
|
||||
# c.f. https://unix.stackexchange.com/a/338730
|
||||
os.system("ln -s /dev/null /etc/systemd/network/99-default.link")
|
|
@ -1,99 +0,0 @@
|
|||
import os
|
||||
import glob
|
||||
from shutil import copy2
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.service import _run_service_command
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
PHP5_POOLS = "/etc/php5/fpm/pool.d"
|
||||
PHP7_POOLS = "/etc/php/7.0/fpm/pool.d"
|
||||
|
||||
PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm"
|
||||
PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
|
||||
|
||||
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS)
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate php5-fpm 'pool' conf files to php7 stuff"
|
||||
|
||||
dependencies = ["migrate_to_stretch"]
|
||||
|
||||
def run(self):
|
||||
# Get list of php5 pool files
|
||||
php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS))
|
||||
|
||||
# Keep only basenames
|
||||
php5_pool_files = [os.path.basename(f) for f in php5_pool_files]
|
||||
|
||||
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
|
||||
php5_pool_files = [f for f in php5_pool_files if f != "www.conf"]
|
||||
|
||||
for f in php5_pool_files:
|
||||
|
||||
# Copy the files to the php7 pool
|
||||
src = "{}/{}".format(PHP5_POOLS, f)
|
||||
dest = "{}/{}".format(PHP7_POOLS, f)
|
||||
copy2(src, dest)
|
||||
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest)
|
||||
os.system(c)
|
||||
|
||||
# Also add a comment that it was automatically moved from php5
|
||||
# (for human traceability and backward migration)
|
||||
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
|
||||
os.system(c)
|
||||
|
||||
# Some old comments starting with '#' instead of ';' are not
|
||||
# compatible in php7
|
||||
c = "sed -i 's/^#/;#/g' {}".format(dest)
|
||||
os.system(c)
|
||||
|
||||
# Reload/restart the php pools
|
||||
_run_service_command("restart", "php7.0-fpm")
|
||||
_run_service_command("enable", "php7.0-fpm")
|
||||
os.system("systemctl stop php5-fpm")
|
||||
os.system("systemctl disable php5-fpm")
|
||||
os.system("rm /etc/logrotate.d/php5-fpm") # We remove this otherwise the logrotate cron will be unhappy
|
||||
|
||||
# Get list of nginx conf file
|
||||
nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf")
|
||||
for f in nginx_conf_files:
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, f)
|
||||
os.system(c)
|
||||
|
||||
# Reload nginx
|
||||
_run_service_command("reload", "nginx")
|
||||
|
||||
def backward(self):
|
||||
|
||||
# Get list of php7 pool files
|
||||
php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS))
|
||||
|
||||
# Keep only files which have the migration comment
|
||||
php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT]
|
||||
|
||||
# Delete those files
|
||||
for f in php7_pool_files:
|
||||
os.remove(f)
|
||||
|
||||
# Reload/restart the php pools
|
||||
_run_service_command("stop", "php7.0-fpm")
|
||||
os.system("systemctl start php5-fpm")
|
||||
|
||||
# Get list of nginx conf file
|
||||
nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf")
|
||||
for f in nginx_conf_files:
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP7_SOCKETS_PREFIX, PHP5_SOCKETS_PREFIX, f)
|
||||
os.system(c)
|
||||
|
||||
# Reload nginx
|
||||
_run_service_command("reload", "nginx")
|
|
@ -1,41 +0,0 @@
|
|||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate DBs from Postgresql 9.4 to 9.6 after migrating to Stretch"
|
||||
|
||||
dependencies = ["migrate_to_stretch"]
|
||||
|
||||
def run(self):
|
||||
|
||||
if not self.package_is_installed("postgresql-9.4"):
|
||||
logger.warning(m18n.n("migration_0005_postgresql_94_not_installed"))
|
||||
return
|
||||
|
||||
if not self.package_is_installed("postgresql-9.6"):
|
||||
raise YunohostError("migration_0005_postgresql_96_not_installed")
|
||||
|
||||
if not space_used_by_directory("/var/lib/postgresql/9.4") > free_space_in_directory("/var/lib/postgresql"):
|
||||
raise YunohostError("migration_0005_not_enough_space", path="/var/lib/postgresql/")
|
||||
|
||||
subprocess.check_call("service postgresql stop", shell=True)
|
||||
subprocess.check_call("pg_dropcluster --stop 9.6 main", shell=True)
|
||||
subprocess.check_call("pg_upgradecluster -m upgrade 9.4 main", shell=True)
|
||||
subprocess.check_call("pg_dropcluster --stop 9.4 main", shell=True)
|
||||
subprocess.check_call("service postgresql start", shell=True)
|
||||
|
||||
def package_is_installed(self, package_name):
|
||||
|
||||
p = subprocess.Popen("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), shell=True)
|
||||
p.communicate()
|
||||
return p.returncode == 0
|
|
@ -1,78 +0,0 @@
|
|||
import spwd
|
||||
import crypt
|
||||
import random
|
||||
import string
|
||||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.process import run_commands, check_output
|
||||
from moulinette.utils.filesystem import append_to_file
|
||||
from moulinette.authenticators.ldap import Authenticator
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
SMALL_PWD_LIST = ["yunohost", "olinuxino", "olinux", "raspberry", "admin", "root", "test", "rpi"]
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Synchronize admin and root passwords"
|
||||
|
||||
def run(self):
|
||||
|
||||
new_hash = self._get_admin_hash()
|
||||
self._replace_root_hash(new_hash)
|
||||
|
||||
logger.info(m18n.n("root_password_replaced_by_admin_password"))
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
|
||||
# If the root password is still a "default" value,
|
||||
# then this is an emergency and migration shall
|
||||
# be applied automatically
|
||||
#
|
||||
# Otherwise, as playing with root password is touchy,
|
||||
# we set this as a manual migration.
|
||||
return "auto" if self._is_root_pwd_listed(SMALL_PWD_LIST) else "manual"
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
if self._is_root_pwd_listed(SMALL_PWD_LIST):
|
||||
return None
|
||||
|
||||
return m18n.n("migration_0006_disclaimer")
|
||||
|
||||
def _get_admin_hash(self):
|
||||
"""
|
||||
Fetch the admin hash from the LDAP db using slapcat
|
||||
"""
|
||||
admin_hash = check_output("slapcat \
|
||||
| grep 'dn: cn=admin,dc=yunohost,dc=org' -A20 \
|
||||
| grep userPassword -A2 \
|
||||
| tr -d '\n ' \
|
||||
| tr ':' ' ' \
|
||||
| awk '{print $2}' \
|
||||
| base64 -d \
|
||||
| sed 's/{CRYPT}//g'")
|
||||
return admin_hash
|
||||
|
||||
def _replace_root_hash(self, new_hash):
|
||||
hash_root = spwd.getspnam("root").sp_pwd
|
||||
|
||||
with open('/etc/shadow', 'r') as before_file:
|
||||
before = before_file.read()
|
||||
|
||||
with open('/etc/shadow', 'w') as after_file:
|
||||
after_file.write(before.replace("root:" + hash_root,
|
||||
"root:" + new_hash))
|
||||
|
||||
def _is_root_pwd_listed(self, pwd_list):
|
||||
hash_root = spwd.getspnam("root").sp_pwd
|
||||
|
||||
for password in pwd_list:
|
||||
if hash_root == crypt.crypt(password, hash_root):
|
||||
return True
|
||||
return False
|
|
@ -1,70 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
from shutil import copyfile
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import mkdir, rm
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.service import _run_service_command
|
||||
from yunohost.regenconf import regen_conf
|
||||
from yunohost.settings import settings_set
|
||||
from yunohost.utils.error import YunohostError
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
SSHD_CONF = '/etc/ssh/sshd_config'
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"""
|
||||
This is the first step of a couple of migrations that ensure SSH conf is
|
||||
managed by YunoHost (even if the "from_script" flag is present, which was
|
||||
previously preventing it from being managed by YunoHost)
|
||||
|
||||
The goal of this first (automatic) migration is to make sure that the
|
||||
sshd_config is managed by the regen-conf mechanism.
|
||||
|
||||
If the from_script flag exists, then we keep the current SSH conf such that it
|
||||
will appear as "manually modified" to the regenconf.
|
||||
|
||||
In step 2 (manual), the admin will be able to choose wether or not to actually
|
||||
use the recommended configuration, with an appropriate disclaimer.
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
|
||||
# Check if deprecated DSA Host Key is in config
|
||||
dsa_rgx = r'^[ \t]*HostKey[ \t]+/etc/ssh/ssh_host_dsa_key[ \t]*(?:#.*)?$'
|
||||
dsa = False
|
||||
for line in open(SSHD_CONF):
|
||||
if re.match(dsa_rgx, line) is not None:
|
||||
dsa = True
|
||||
break
|
||||
if dsa:
|
||||
settings_set("service.ssh.allow_deprecated_dsa_hostkey", True)
|
||||
|
||||
# Here, we make it so that /etc/ssh/sshd_config is managed
|
||||
# by the regen conf (in particular in the case where the
|
||||
# from_script flag is present - in which case it was *not*
|
||||
# managed by the regenconf)
|
||||
# But because we can't be sure the user wants to use the
|
||||
# recommended conf, we backup then restore the /etc/ssh/sshd_config
|
||||
# right after the regenconf, such that it will appear as
|
||||
# "manually modified".
|
||||
if os.path.exists('/etc/yunohost/from_script'):
|
||||
rm('/etc/yunohost/from_script')
|
||||
copyfile(SSHD_CONF, '/etc/ssh/sshd_config.bkp')
|
||||
regen_conf(names=['ssh'], force=True)
|
||||
copyfile('/etc/ssh/sshd_config.bkp', SSHD_CONF)
|
||||
|
||||
# Restart ssh and rollback if it failed
|
||||
if not _run_service_command('restart', 'ssh'):
|
||||
# We don't rollback completely but it should be enough
|
||||
copyfile('/etc/ssh/sshd_config.bkp', SSHD_CONF)
|
||||
if not _run_service_command('restart', 'ssh'):
|
||||
raise YunohostError("migration_0007_cannot_restart")
|
||||
else:
|
||||
raise YunohostError("migration_0007_cancelled")
|
|
@ -1,105 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
from moulinette import m18n
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import chown
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.regenconf import _get_conf_hashes, _calculate_hash
|
||||
from yunohost.regenconf import regen_conf
|
||||
from yunohost.settings import settings_set, settings_get
|
||||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.backup import ARCHIVES_PATH
|
||||
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
SSHD_CONF = '/etc/ssh/sshd_config'
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"""
|
||||
In this second step, the admin is asked if it's okay to use
|
||||
the recommended SSH configuration - which also implies
|
||||
disabling deprecated DSA key.
|
||||
|
||||
This has important implications in the way the user may connect
|
||||
to its server (key change, and a spooky warning might be given
|
||||
by SSH later)
|
||||
|
||||
A disclaimer explaining the various things to be aware of is
|
||||
shown - and the user may also choose to skip this migration.
|
||||
"""
|
||||
|
||||
dependencies = ["ssh_conf_managed_by_yunohost_step1"]
|
||||
|
||||
def run(self):
|
||||
settings_set("service.ssh.allow_deprecated_dsa_hostkey", False)
|
||||
regen_conf(names=['ssh'], force=True)
|
||||
|
||||
# Update local archives folder permissions, so that
|
||||
# admin can scp archives out of the server
|
||||
if os.path.isdir(ARCHIVES_PATH):
|
||||
chown(ARCHIVES_PATH, uid="admin", gid="root")
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
|
||||
# If the conf is already up to date
|
||||
# and no DSA key is used, then we're good to go
|
||||
# and the migration can be done automatically
|
||||
# (basically nothing shall change)
|
||||
ynh_hash = _get_conf_hashes('ssh').get(SSHD_CONF, None)
|
||||
current_hash = _calculate_hash(SSHD_CONF)
|
||||
dsa = settings_get("service.ssh.allow_deprecated_dsa_hostkey")
|
||||
if ynh_hash == current_hash and not dsa:
|
||||
return "auto"
|
||||
|
||||
return "manual"
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
|
||||
if self.mode == "auto":
|
||||
return None
|
||||
|
||||
# Detect key things to be aware of before enabling the
|
||||
# recommended configuration
|
||||
dsa_key_enabled = False
|
||||
ports = []
|
||||
root_login = []
|
||||
port_rgx = r'^[ \t]*Port[ \t]+(\d+)[ \t]*(?:#.*)?$'
|
||||
root_rgx = r'^[ \t]*PermitRootLogin[ \t]([^# \t]*)[ \t]*(?:#.*)?$'
|
||||
dsa_rgx = r'^[ \t]*HostKey[ \t]+/etc/ssh/ssh_host_dsa_key[ \t]*(?:#.*)?$'
|
||||
for line in open(SSHD_CONF):
|
||||
|
||||
ports = ports + re.findall(port_rgx, line)
|
||||
|
||||
root_login = root_login + re.findall(root_rgx, line)
|
||||
|
||||
if not dsa_key_enabled and re.match(dsa_rgx, line) is not None:
|
||||
dsa_key_enabled = True
|
||||
|
||||
custom_port = ports != ['22'] and ports != []
|
||||
root_login_enabled = root_login and root_login[-1] != 'no'
|
||||
|
||||
# Build message
|
||||
message = m18n.n("migration_0008_general_disclaimer")
|
||||
|
||||
if custom_port:
|
||||
message += "\n\n" + m18n.n("migration_0008_port")
|
||||
|
||||
if root_login_enabled:
|
||||
message += "\n\n" + m18n.n("migration_0008_root")
|
||||
|
||||
if dsa_key_enabled:
|
||||
message += "\n\n" + m18n.n("migration_0008_dsa")
|
||||
|
||||
if custom_port or root_login_enabled or dsa_key_enabled:
|
||||
message += "\n\n" + m18n.n("migration_0008_warning")
|
||||
else:
|
||||
message += "\n\n" + m18n.n("migration_0008_no_warning")
|
||||
|
||||
return message
|
|
@ -1,39 +0,0 @@
|
|||
import os
|
||||
|
||||
from moulinette import m18n
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from moulinette.utils.filesystem import read_file
|
||||
from yunohost.service import _get_services, _save_services
|
||||
from yunohost.regenconf import _update_conf_hashes, REGEN_CONF_FILE
|
||||
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
"""
|
||||
Decouple the regen conf mechanism from the concept of services
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
|
||||
if "conffiles" not in read_file("/etc/yunohost/services.yml") \
|
||||
or os.path.exists(REGEN_CONF_FILE):
|
||||
logger.warning(m18n.n("migration_0009_not_needed"))
|
||||
return
|
||||
|
||||
# For all services
|
||||
services = _get_services()
|
||||
for service, infos in services.items():
|
||||
# If there are some conffiles (file hashes)
|
||||
if "conffiles" in infos.keys():
|
||||
# Save them using the new regen conf thingy
|
||||
_update_conf_hashes(service, infos["conffiles"])
|
||||
# And delete the old conffile key from the service infos
|
||||
del services[service]["conffiles"]
|
||||
|
||||
# (Actually save the modification of services)
|
||||
_save_services(services)
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
from moulinette.utils.log import getActionLogger
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate from official.json to apps.json (outdated, replaced by migration 13)"
|
||||
|
||||
def run(self):
|
||||
logger.info("This migration is oudated and doesn't do anything anymore. The migration 13 will handle this instead.")
|
||||
pass
|
|
@ -1,181 +0,0 @@
|
|||
import time
|
||||
import os
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_yaml
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.user import user_list, user_group_create, user_group_update
|
||||
from yunohost.app import app_setting, _installed_apps
|
||||
from yunohost.regenconf import regen_conf, BACKUP_CONF_DIR
|
||||
from yunohost.permission import permission_create, user_permission_update, permission_sync_to_user
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
###################################################
|
||||
# Tools used also for restoration
|
||||
###################################################
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
"""
|
||||
Update the LDAP DB to be able to store the permission
|
||||
Create a group for each yunohost user
|
||||
Migrate app permission from apps setting to LDAP
|
||||
"""
|
||||
|
||||
required = True
|
||||
|
||||
def remove_if_exists(self, target):
|
||||
|
||||
from yunohost.utils.ldap import _get_ldap_interface
|
||||
ldap = _get_ldap_interface()
|
||||
|
||||
try:
|
||||
objects = ldap.search(target + ",dc=yunohost,dc=org")
|
||||
# ldap search will raise an exception if no corresponding object is found >.> ...
|
||||
except Exception as e:
|
||||
logger.debug("%s does not exist, no need to delete it" % target)
|
||||
return
|
||||
|
||||
objects.reverse()
|
||||
for o in objects:
|
||||
for dn in o["dn"]:
|
||||
dn = dn.replace(",dc=yunohost,dc=org", "")
|
||||
logger.debug("Deleting old object %s ..." % dn)
|
||||
try:
|
||||
ldap.remove(dn)
|
||||
except Exception as e:
|
||||
raise YunohostError("migration_0011_failed_to_remove_stale_object", dn=dn, error=e)
|
||||
|
||||
def migrate_LDAP_db(self):
|
||||
|
||||
logger.info(m18n.n("migration_0011_update_LDAP_database"))
|
||||
|
||||
from yunohost.utils.ldap import _get_ldap_interface
|
||||
ldap = _get_ldap_interface()
|
||||
|
||||
ldap_map = read_yaml('/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml')
|
||||
|
||||
try:
|
||||
self.remove_if_exists("ou=permission")
|
||||
self.remove_if_exists('ou=groups')
|
||||
|
||||
attr_dict = ldap_map['parents']['ou=permission']
|
||||
ldap.add('ou=permission', attr_dict)
|
||||
|
||||
attr_dict = ldap_map['parents']['ou=groups']
|
||||
ldap.add('ou=groups', attr_dict)
|
||||
|
||||
attr_dict = ldap_map['children']['cn=all_users,ou=groups']
|
||||
ldap.add('cn=all_users,ou=groups', attr_dict)
|
||||
|
||||
attr_dict = ldap_map['children']['cn=visitors,ou=groups']
|
||||
ldap.add('cn=visitors,ou=groups', attr_dict)
|
||||
|
||||
for rdn, attr_dict in ldap_map['depends_children'].items():
|
||||
ldap.add(rdn, attr_dict)
|
||||
except Exception as e:
|
||||
raise YunohostError("migration_0011_LDAP_update_failed", error=e)
|
||||
|
||||
logger.info(m18n.n("migration_0011_create_group"))
|
||||
|
||||
# Create a group for each yunohost user
|
||||
user_list = ldap.search('ou=users,dc=yunohost,dc=org',
|
||||
'(&(objectclass=person)(!(uid=root))(!(uid=nobody)))',
|
||||
['uid', 'uidNumber'])
|
||||
for user_info in user_list:
|
||||
username = user_info['uid'][0]
|
||||
ldap.update('uid=%s,ou=users' % username,
|
||||
{'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh']})
|
||||
user_group_create(username, gid=user_info['uidNumber'][0], primary_group=True, sync_perm=False)
|
||||
user_group_update(groupname='all_users', add=username, force=True, sync_perm=False)
|
||||
|
||||
def migrate_app_permission(self, app=None):
|
||||
logger.info(m18n.n("migration_0011_migrate_permission"))
|
||||
|
||||
apps = _installed_apps()
|
||||
|
||||
if app:
|
||||
if app not in apps:
|
||||
logger.error("Can't migrate permission for app %s because it ain't installed..." % app)
|
||||
apps = []
|
||||
else:
|
||||
apps = [app]
|
||||
|
||||
for app in apps:
|
||||
permission = app_setting(app, 'allowed_users')
|
||||
path = app_setting(app, 'path')
|
||||
domain = app_setting(app, 'domain')
|
||||
|
||||
url = "/" if domain and path else None
|
||||
if permission:
|
||||
known_users = user_list()["users"].keys()
|
||||
allowed = [user for user in permission.split(',') if user in known_users]
|
||||
else:
|
||||
allowed = ["all_users"]
|
||||
permission_create(app+".main", url=url, allowed=allowed, protected=False, sync_perm=False)
|
||||
|
||||
app_setting(app, 'allowed_users', delete=True)
|
||||
|
||||
# Migrate classic public app still using the legacy unprotected_uris
|
||||
if app_setting(app, "unprotected_uris") == "/" or app_setting(app, "skipped_uris") == "/":
|
||||
user_permission_update(app+".main", add="visitors", sync_perm=False)
|
||||
|
||||
permission_sync_to_user()
|
||||
|
||||
def run(self):
|
||||
|
||||
# FIXME : what do we really want to do here ...
|
||||
# Imho we should just force-regen the conf in all case, and maybe
|
||||
# just display a warning if we detect that the conf was manually modified
|
||||
|
||||
# Check if the migration can be processed
|
||||
ldap_regen_conf_status = regen_conf(names=['slapd'], dry_run=True)
|
||||
# By this we check if the have been customized
|
||||
if ldap_regen_conf_status and ldap_regen_conf_status['slapd']['pending']:
|
||||
logger.warning(m18n.n("migration_0011_slapd_config_will_be_overwritten", conf_backup_folder=BACKUP_CONF_DIR))
|
||||
|
||||
# Backup LDAP and the apps settings before to do the migration
|
||||
logger.info(m18n.n("migration_0011_backup_before_migration"))
|
||||
try:
|
||||
backup_folder = "/home/yunohost.backup/premigration/" + time.strftime('%Y%m%d-%H%M%S', time.gmtime())
|
||||
os.makedirs(backup_folder, 0o750)
|
||||
os.system("systemctl stop slapd")
|
||||
os.system("cp -r --preserve /etc/ldap %s/ldap_config" % backup_folder)
|
||||
os.system("cp -r --preserve /var/lib/ldap %s/ldap_db" % backup_folder)
|
||||
os.system("cp -r --preserve /etc/yunohost/apps %s/apps_settings" % backup_folder)
|
||||
except Exception as e:
|
||||
raise YunohostError("migration_0011_can_not_backup_before_migration", error=e)
|
||||
finally:
|
||||
os.system("systemctl start slapd")
|
||||
|
||||
try:
|
||||
# Update LDAP schema restart slapd
|
||||
logger.info(m18n.n("migration_0011_update_LDAP_schema"))
|
||||
regen_conf(names=['slapd'], force=True)
|
||||
|
||||
# Update LDAP database
|
||||
self.migrate_LDAP_db()
|
||||
|
||||
# Migrate permission
|
||||
self.migrate_app_permission()
|
||||
|
||||
permission_sync_to_user()
|
||||
except Exception as e:
|
||||
logger.warn(m18n.n("migration_0011_migration_failed_trying_to_rollback"))
|
||||
os.system("systemctl stop slapd")
|
||||
os.system("rm -r /etc/ldap/slapd.d") # To be sure that we don't keep some part of the old config
|
||||
os.system("cp -r --preserve %s/ldap_config/. /etc/ldap/" % backup_folder)
|
||||
os.system("cp -r --preserve %s/ldap_db/. /var/lib/ldap/" % backup_folder)
|
||||
os.system("cp -r --preserve %s/apps_settings/. /etc/yunohost/apps/" % backup_folder)
|
||||
os.system("systemctl start slapd")
|
||||
os.system("rm -r " + backup_folder)
|
||||
logger.info(m18n.n("migration_0011_rollback_success"))
|
||||
raise
|
||||
else:
|
||||
os.system("rm -r " + backup_folder)
|
||||
|
||||
logger.info(m18n.n("migration_0011_done"))
|
|
@ -1,16 +0,0 @@
|
|||
import glob
|
||||
import re
|
||||
from yunohost.tools import Migration
|
||||
from moulinette.utils.filesystem import read_file, write_to_file
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Force authentication in md5 for local connexions"
|
||||
|
||||
all_hba_files = glob.glob("/etc/postgresql/*/*/pg_hba.conf")
|
||||
|
||||
def run(self):
|
||||
for filename in self.all_hba_files:
|
||||
pg_hba_in = read_file(filename)
|
||||
write_to_file(filename, re.sub(r"local(\s*)all(\s*)all(\s*)password", "local\\1all\\2all\\3md5", pg_hba_in))
|
|
@ -1,51 +0,0 @@
|
|||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_json
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.app import (_initialize_apps_catalog_system,
|
||||
_update_apps_catalog,
|
||||
APPS_CATALOG_CACHE,
|
||||
APPS_CATALOG_CONF)
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
LEGACY_APPS_CATALOG_CONF = '/etc/yunohost/appslists.json'
|
||||
LEGACY_APPS_CATALOG_CONF_BACKUP = LEGACY_APPS_CATALOG_CONF + ".old"
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate to the new future-proof apps catalog system"
|
||||
|
||||
def run(self):
|
||||
|
||||
if not os.path.exists(LEGACY_APPS_CATALOG_CONF):
|
||||
logger.info("No need to do anything")
|
||||
|
||||
# Destroy old lecacy cache
|
||||
if os.path.exists(APPS_CATALOG_CACHE):
|
||||
shutil.rmtree(APPS_CATALOG_CACHE)
|
||||
|
||||
# and legacy cron
|
||||
if os.path.exists("/etc/cron.daily/yunohost-fetch-appslists"):
|
||||
os.remove("/etc/cron.daily/yunohost-fetch-appslists")
|
||||
|
||||
# Backup the legacy file
|
||||
try:
|
||||
legacy_catalogs = read_json(LEGACY_APPS_CATALOG_CONF)
|
||||
# If there's only one catalog, we assume it's just the old official catalog
|
||||
# Otherwise, warn the (power-?)users that they should migrate their old catalogs manually
|
||||
if len(legacy_catalogs) > 1:
|
||||
logger.warning("It looks like you had additional apps_catalog in the configuration file %s! YunoHost now uses %s instead, but it won't migrate your custom apps_catalog. You should do this manually. The old file has been backuped in %s." % (LEGACY_APPS_CATALOG_CONF, APPS_CATALOG_CONF, LEGACY_APPS_CATALOG_CONF_BACKUP))
|
||||
except Exception as e:
|
||||
logger.warning("Unable to parse the legacy conf %s (error : %s) ... migrating anyway" % (LEGACY_APPS_CATALOG_CONF, str(e)))
|
||||
|
||||
if os.path.exists(LEGACY_APPS_CATALOG_CONF):
|
||||
os.rename(LEGACY_APPS_CATALOG_CONF, LEGACY_APPS_CATALOG_CONF_BACKUP)
|
||||
|
||||
_initialize_apps_catalog_system()
|
||||
_update_apps_catalog()
|
|
@ -1,31 +0,0 @@
|
|||
import os
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_json
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.app import app_setting, APPS_SETTING_PATH
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"""Remove legacy app status.json files"""
|
||||
|
||||
def run(self):
|
||||
|
||||
apps = os.listdir(APPS_SETTING_PATH)
|
||||
|
||||
for app in apps:
|
||||
status_file = os.path.join(APPS_SETTING_PATH, app, "status.json")
|
||||
if not os.path.exists(status_file):
|
||||
continue
|
||||
|
||||
try:
|
||||
status = read_json(status_file)
|
||||
current_revision = status.get("remote", {}).get("revision", "?")
|
||||
app_setting(app, 'current_revision', current_revision)
|
||||
except Exception as e:
|
||||
logger.warning("Could not migrate status.json from app %s: %s", (app, str(e)))
|
||||
else:
|
||||
os.system("rm %s" % status_file)
|
244
src/yunohost/data_migrations/0015_migrate_to_buster.py
Normal file
244
src/yunohost/data_migrations/0015_migrate_to_buster.py
Normal file
|
@ -0,0 +1,244 @@
|
|||
|
||||
import glob
|
||||
import os
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.process import check_output, call_async_output
|
||||
from moulinette.utils.filesystem import read_file
|
||||
|
||||
from yunohost.tools import Migration, tools_update, tools_upgrade
|
||||
from yunohost.app import unstable_apps
|
||||
from yunohost.regenconf import manually_modified_files
|
||||
from yunohost.utils.filesystem import free_space_in_directory
|
||||
from yunohost.utils.packages import get_ynh_package_version, _list_upgradable_apt_packages
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Upgrade the system to Debian Buster and Yunohost 4.x"
|
||||
|
||||
mode = "manual"
|
||||
|
||||
def run(self):
|
||||
|
||||
self.check_assertions()
|
||||
|
||||
logger.info(m18n.n("migration_0015_start"))
|
||||
|
||||
#
|
||||
# Make sure certificates do not use weak signature hash algorithms (md5, sha1)
|
||||
# otherwise nginx will later refuse to start which result in
|
||||
# catastrophic situation
|
||||
#
|
||||
self.validate_and_upgrade_cert_if_necessary()
|
||||
|
||||
#
|
||||
# Patch sources.list
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_patching_sources_list"))
|
||||
self.patch_apt_sources_list()
|
||||
tools_update(system=True)
|
||||
|
||||
# Tell libc6 it's okay to restart system stuff during the upgrade
|
||||
os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections")
|
||||
|
||||
# Don't send an email to root about the postgresql migration. It should be handled automatically after.
|
||||
os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections")
|
||||
|
||||
#
|
||||
# Specific packages upgrades
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_specific_upgrade"))
|
||||
|
||||
# Update unscd independently, was 0.53-1+yunohost on stretch (custom build of ours) but now it's 0.53-1+b1 on vanilla buster,
|
||||
# which for apt appears as a lower version (hence the --allow-downgrades and the hardcoded version number)
|
||||
unscd_version = check_output('dpkg -s unscd | grep "^Version: " | cut -d " " -f 2')
|
||||
if "yunohost" in unscd_version:
|
||||
new_version = check_output("LC_ALL=C apt policy unscd 2>/dev/null | grep -v '\\*\\*\\*' | grep http -B1 | head -n 1 | awk '{print $1}'").strip()
|
||||
if new_version:
|
||||
self.apt_install('unscd=%s --allow-downgrades' % new_version)
|
||||
else:
|
||||
logger.warning("Could not identify which version of unscd to install")
|
||||
|
||||
# Upgrade libpam-modules independently, small issue related to willing to overwrite a file previously provided by Yunohost
|
||||
libpammodules_version = check_output('dpkg -s libpam-modules | grep "^Version: " | cut -d " " -f 2')
|
||||
if not libpammodules_version.startswith("1.3"):
|
||||
self.apt_install('libpam-modules -o Dpkg::Options::="--force-overwrite"')
|
||||
|
||||
#
|
||||
# Main upgrade
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_main_upgrade"))
|
||||
|
||||
apps_packages = self.get_apps_equivs_packages()
|
||||
self.hold(apps_packages)
|
||||
tools_upgrade(system=True, allow_yunohost_upgrade=False)
|
||||
|
||||
if self.debian_major_version() == 9:
|
||||
raise YunohostError("migration_0015_still_on_stretch_after_main_upgrade")
|
||||
|
||||
# Clean the mess
|
||||
logger.info(m18n.n("migration_0015_cleaning_up"))
|
||||
os.system("apt autoremove --assume-yes")
|
||||
os.system("apt clean --assume-yes")
|
||||
|
||||
#
|
||||
# Yunohost upgrade
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_yunohost_upgrade"))
|
||||
self.unhold(apps_packages)
|
||||
tools_upgrade(system=True)
|
||||
|
||||
def debian_major_version(self):
|
||||
# The python module "platform" and lsb_release are not reliable because
|
||||
# on some setup, they may still return Release=9 even after upgrading to
|
||||
# buster ... (Apparently this is related to OVH overriding some stuff
|
||||
# with /etc/lsb-release for instance -_-)
|
||||
# Instead, we rely on /etc/os-release which should be the raw info from
|
||||
# the distribution...
|
||||
return int(check_output("grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2"))
|
||||
|
||||
def yunohost_major_version(self):
|
||||
return int(get_ynh_package_version("yunohost")["version"].split('.')[0])
|
||||
|
||||
def check_assertions(self):
|
||||
|
||||
# Be on stretch (9.x) and yunohost 3.x
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be > 9.x but yunohost package
|
||||
# would still be in 3.x...
|
||||
if not self.debian_major_version() == 9 \
|
||||
and not self.yunohost_major_version() == 3:
|
||||
raise YunohostError("migration_0015_not_stretch")
|
||||
|
||||
# Have > 1 Go free space on /var/ ?
|
||||
if free_space_in_directory("/var/") / (1024**3) < 1.0:
|
||||
raise YunohostError("migration_0015_not_enough_free_space")
|
||||
|
||||
# Check system is up to date
|
||||
# (but we don't if 'stretch' is already in the sources.list ...
|
||||
# which means maybe a previous upgrade crashed and we're re-running it)
|
||||
if " buster " not in read_file("/etc/apt/sources.list"):
|
||||
tools_update(system=True)
|
||||
upgradable_system_packages = list(_list_upgradable_apt_packages())
|
||||
if upgradable_system_packages:
|
||||
raise YunohostError("migration_0015_system_not_fully_up_to_date")
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
|
||||
# Avoid having a super long disclaimer + uncessary check if we ain't
|
||||
# on stretch / yunohost 3.x anymore
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be >= 10.x but yunohost package
|
||||
# would still be in 3.x...
|
||||
if not self.debian_major_version() == 9 \
|
||||
and not self.yunohost_major_version() == 3:
|
||||
return None
|
||||
|
||||
# Get list of problematic apps ? I.e. not official or community+working
|
||||
problematic_apps = unstable_apps()
|
||||
problematic_apps = "".join(["\n - " + app for app in problematic_apps])
|
||||
|
||||
# Manually modified files ? (c.f. yunohost service regen-conf)
|
||||
modified_files = manually_modified_files()
|
||||
modified_files = "".join(["\n - " + f for f in modified_files])
|
||||
|
||||
message = m18n.n("migration_0015_general_warning")
|
||||
|
||||
message = "N.B.: This migration has been tested by the community over the last few months but has only been declared stable recently. If your server hosts critical services and if you are not too confident with debugging possible issues, we recommend you to wait a little bit more while we gather more feedback and polish things up. If on the other hand you are relatively confident with debugging small issues that may arise, you are encouraged to run this migration ;)! You can read about remaining known issues and feedback from the community here: https://forum.yunohost.org/t/12195\n\n" + message
|
||||
|
||||
if problematic_apps:
|
||||
message += "\n\n" + m18n.n("migration_0015_problematic_apps_warning", problematic_apps=problematic_apps)
|
||||
|
||||
if modified_files:
|
||||
message += "\n\n" + m18n.n("migration_0015_modified_files", manually_modified_files=modified_files)
|
||||
|
||||
return message
|
||||
|
||||
def patch_apt_sources_list(self):
|
||||
|
||||
sources_list = glob.glob("/etc/apt/sources.list.d/*.list")
|
||||
sources_list.append("/etc/apt/sources.list")
|
||||
|
||||
# This :
|
||||
# - replace single 'stretch' occurence by 'buster'
|
||||
# - comments lines containing "backports"
|
||||
# - replace 'stretch/updates' by 'strech/updates' (or same with -)
|
||||
for f in sources_list:
|
||||
command = "sed -i -e 's@ stretch @ buster @g' " \
|
||||
"-e '/backports/ s@^#*@#@' " \
|
||||
"-e 's@ stretch/updates @ buster/updates @g' " \
|
||||
"-e 's@ stretch-@ buster-@g' " \
|
||||
"{}".format(f)
|
||||
os.system(command)
|
||||
|
||||
def get_apps_equivs_packages(self):
|
||||
|
||||
command = "dpkg --get-selections" \
|
||||
" | grep -v deinstall" \
|
||||
" | awk '{print $1}'" \
|
||||
" | { grep 'ynh-deps$' || true; }"
|
||||
|
||||
output = check_output(command).strip()
|
||||
|
||||
return output.split('\n') if output else []
|
||||
|
||||
def hold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark hold {}".format(package))
|
||||
|
||||
def unhold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark unhold {}".format(package))
|
||||
|
||||
def apt_install(self, cmd):
|
||||
|
||||
def is_relevant(l):
|
||||
return "Reading database ..." not in l.rstrip()
|
||||
|
||||
callbacks = (
|
||||
lambda l: logger.info("+ " + l.rstrip() + "\r") if is_relevant(l) else logger.debug(l.rstrip() + "\r"),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
)
|
||||
|
||||
cmd = "LC_ALL=C DEBIAN_FRONTEND=noninteractive APT_LISTCHANGES_FRONTEND=none apt install --quiet -o=Dpkg::Use-Pty=0 --fix-broken --assume-yes " + cmd
|
||||
|
||||
logger.debug("Running: %s" % cmd)
|
||||
|
||||
call_async_output(cmd, callbacks, shell=True)
|
||||
|
||||
def validate_and_upgrade_cert_if_necessary(self):
|
||||
|
||||
active_certs = set(check_output("grep -roh '/.*crt.pem' /etc/nginx/").strip().split("\n"))
|
||||
|
||||
cmd = "LC_ALL=C openssl x509 -in %s -text -noout | grep -i 'Signature Algorithm:' | awk '{print $3}' | uniq"
|
||||
|
||||
default_crt = '/etc/yunohost/certs/yunohost.org/crt.pem'
|
||||
default_key = '/etc/yunohost/certs/yunohost.org/key.pem'
|
||||
default_signature = check_output(cmd % default_crt).strip() if default_crt in active_certs else None
|
||||
if default_signature is not None and (default_signature.startswith("md5") or default_signature.startswith("sha1")):
|
||||
logger.warning("%s is using a pretty old certificate incompatible with newer versions of nginx ... attempting to regenerate a fresh one" % default_crt)
|
||||
|
||||
os.system("mv %s %s.old" % (default_crt, default_crt))
|
||||
os.system("mv %s %s.old" % (default_key, default_key))
|
||||
ret = os.system("/usr/share/yunohost/hooks/conf_regen/02-ssl init")
|
||||
|
||||
if ret != 0 or not os.path.exists(default_crt):
|
||||
logger.error("Upgrading the certificate failed ... reverting")
|
||||
os.system("mv %s.old %s" % (default_crt, default_crt))
|
||||
os.system("mv %s.old %s" % (default_key, default_key))
|
||||
|
||||
signatures = {cert: check_output(cmd % cert).strip() for cert in active_certs}
|
||||
|
||||
def cert_is_weak(cert):
|
||||
sig = signatures[cert]
|
||||
return sig.startswith("md5") or sig.startswith("sha1")
|
||||
|
||||
weak_certs = [cert for cert in signatures.keys() if cert_is_weak(cert)]
|
||||
if weak_certs:
|
||||
raise YunohostError("migration_0015_weak_certs", certs=", ".join(weak_certs))
|
73
src/yunohost/data_migrations/0016_php70_to_php73_pools.py
Normal file
73
src/yunohost/data_migrations/0016_php70_to_php73_pools.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
import os
|
||||
import glob
|
||||
from shutil import copy2
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.app import _is_installed, _get_app_settings, _set_app_settings, _patch_legacy_php_versions_in_settings
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.service import _run_service_command
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
PHP70_POOLS = "/etc/php/7.0/fpm/pool.d"
|
||||
PHP73_POOLS = "/etc/php/7.3/fpm/pool.d"
|
||||
|
||||
PHP70_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
|
||||
PHP73_SOCKETS_PREFIX = "/run/php/php7.3-fpm"
|
||||
|
||||
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP70_POOLS)
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate php7.0-fpm 'pool' conf files to php7.3"
|
||||
|
||||
dependencies = ["migrate_to_buster"]
|
||||
|
||||
def run(self):
|
||||
# Get list of php7.0 pool files
|
||||
php70_pool_files = glob.glob("{}/*.conf".format(PHP70_POOLS))
|
||||
|
||||
# Keep only basenames
|
||||
php70_pool_files = [os.path.basename(f) for f in php70_pool_files]
|
||||
|
||||
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
|
||||
php70_pool_files = [f for f in php70_pool_files if f != "www.conf"]
|
||||
|
||||
for f in php70_pool_files:
|
||||
|
||||
# Copy the files to the php7.3 pool
|
||||
src = "{}/{}".format(PHP70_POOLS, f)
|
||||
dest = "{}/{}".format(PHP73_POOLS, f)
|
||||
copy2(src, dest)
|
||||
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, dest)
|
||||
os.system(c)
|
||||
|
||||
# Also add a comment that it was automatically moved from php7.0
|
||||
# (for human traceability and backward migration)
|
||||
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
|
||||
os.system(c)
|
||||
|
||||
app_id = os.path.basename(f)[:-len(".conf")]
|
||||
if _is_installed(app_id):
|
||||
_patch_legacy_php_versions_in_settings("/etc/yunohost/apps/%s/" % app_id)
|
||||
|
||||
nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/%s.conf" % app_id)
|
||||
for f in nginx_conf_files:
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, f)
|
||||
os.system(c)
|
||||
|
||||
os.system("rm /etc/logrotate.d/php7.0-fpm") # We remove this otherwise the logrotate cron will be unhappy
|
||||
|
||||
# Reload/restart the php pools
|
||||
_run_service_command("restart", "php7.3-fpm")
|
||||
_run_service_command("enable", "php7.3-fpm")
|
||||
os.system("systemctl stop php7.0-fpm")
|
||||
os.system("systemctl disable php7.0-fpm")
|
||||
|
||||
# Reload nginx
|
||||
_run_service_command("reload", "nginx")
|
66
src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py
Normal file
66
src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py
Normal file
|
@ -0,0 +1,66 @@
|
|||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate DBs from Postgresql 9.6 to 11 after migrating to Buster"
|
||||
|
||||
dependencies = ["migrate_to_buster"]
|
||||
|
||||
def run(self):
|
||||
|
||||
if not self.package_is_installed("postgresql-9.6"):
|
||||
logger.warning(m18n.n("migration_0017_postgresql_96_not_installed"))
|
||||
return
|
||||
|
||||
if not self.package_is_installed("postgresql-11"):
|
||||
raise YunohostError("migration_0017_postgresql_11_not_installed")
|
||||
|
||||
# Make sure there's a 9.6 cluster
|
||||
try:
|
||||
self.runcmd("pg_lsclusters | grep -q '^9.6 '")
|
||||
except Exception as e:
|
||||
logger.warning("It looks like there's not active 9.6 cluster, so probably don't need to run this migration")
|
||||
return
|
||||
|
||||
if not space_used_by_directory("/var/lib/postgresql/9.6") > free_space_in_directory("/var/lib/postgresql"):
|
||||
raise YunohostError("migration_0017_not_enough_space", path="/var/lib/postgresql/")
|
||||
|
||||
self.runcmd("systemctl stop postgresql")
|
||||
self.runcmd("pg_dropcluster --stop 11 main || true") # We do not trigger an exception if the command fails because that probably means cluster 11 doesn't exists, which is fine because it's created during the pg_upgradecluster)
|
||||
self.runcmd("pg_upgradecluster -m upgrade 9.6 main")
|
||||
self.runcmd("pg_dropcluster --stop 9.6 main")
|
||||
self.runcmd("systemctl start postgresql")
|
||||
|
||||
def package_is_installed(self, package_name):
|
||||
|
||||
(returncode, out, err) = self.runcmd("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), raise_on_errors=False)
|
||||
return returncode == 0
|
||||
|
||||
def runcmd(self, cmd, raise_on_errors=True):
|
||||
|
||||
logger.debug("Running command: " + cmd)
|
||||
|
||||
p = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
executable='/bin/bash',
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
returncode = p.returncode
|
||||
if raise_on_errors and returncode != 0:
|
||||
raise YunohostError("Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(cmd, returncode, out, err))
|
||||
|
||||
out = out.strip().split("\n")
|
||||
return (returncode, out, err)
|
||||
|
109
src/yunohost/data_migrations/0018_xtable_to_nftable.py
Normal file
109
src/yunohost/data_migrations/0018_xtable_to_nftable.py
Normal file
|
@ -0,0 +1,109 @@
|
|||
import os
|
||||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.firewall import firewall_reload
|
||||
from yunohost.service import service_restart
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate legacy iptables rules from stretch that relied on xtable and should now rely on nftable"
|
||||
|
||||
dependencies = ["migrate_to_buster"]
|
||||
|
||||
def run(self):
|
||||
|
||||
self.do_ipv4 = os.system("iptables -w -L >/dev/null") == 0
|
||||
self.do_ipv6 = os.system("ip6tables -w -L >/dev/null") == 0
|
||||
|
||||
if not self.do_ipv4:
|
||||
logger.warning(m18n.n('iptables_unavailable'))
|
||||
if not self.do_ipv6:
|
||||
logger.warning(m18n.n('ip6tables_unavailable'))
|
||||
|
||||
backup_folder = "/home/yunohost.backup/premigration/xtable_to_nftable/"
|
||||
if not os.path.exists(backup_folder):
|
||||
os.makedirs(backup_folder, 0o750)
|
||||
self.backup_rules_ipv4 = os.path.join(backup_folder, "legacy_rules_ipv4")
|
||||
self.backup_rules_ipv6 = os.path.join(backup_folder, "legacy_rules_ipv6")
|
||||
|
||||
# Backup existing legacy rules to be able to rollback
|
||||
if self.do_ipv4 and not os.path.exists(self.backup_rules_ipv4):
|
||||
self.runcmd("iptables-legacy -L >/dev/null") # For some reason if we don't do this, iptables-legacy-save is empty ?
|
||||
self.runcmd("iptables-legacy-save > %s" % self.backup_rules_ipv4)
|
||||
assert open(self.backup_rules_ipv4).read().strip(), "Uhoh backup of legacy ipv4 rules is empty !?"
|
||||
if self.do_ipv6 and not os.path.exists(self.backup_rules_ipv6):
|
||||
self.runcmd("ip6tables-legacy -L >/dev/null") # For some reason if we don't do this, iptables-legacy-save is empty ?
|
||||
self.runcmd("ip6tables-legacy-save > %s" % self.backup_rules_ipv6)
|
||||
assert open(self.backup_rules_ipv6).read().strip(), "Uhoh backup of legacy ipv6 rules is empty !?"
|
||||
|
||||
# We inject the legacy rules (iptables-legacy) into the new iptable (just "iptables")
|
||||
try:
|
||||
if self.do_ipv4:
|
||||
self.runcmd("iptables-legacy-save | iptables-restore")
|
||||
if self.do_ipv6:
|
||||
self.runcmd("ip6tables-legacy-save | ip6tables-restore")
|
||||
except Exception as e:
|
||||
self.rollback()
|
||||
raise YunohostError("migration_0018_failed_to_migrate_iptables_rules", error=e)
|
||||
|
||||
# Reset everything in iptables-legacy
|
||||
# Stolen from https://serverfault.com/a/200642
|
||||
try:
|
||||
if self.do_ipv4:
|
||||
self.runcmd(
|
||||
"iptables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat
|
||||
" /^:[A-Z]+ [^-]/ { print $1 \" ACCEPT\" ; }" # Turn all policies to accept
|
||||
" /COMMIT/ { print $0; }'" # Keep the line COMMIT
|
||||
" | iptables-legacy-restore")
|
||||
if self.do_ipv6:
|
||||
self.runcmd(
|
||||
"ip6tables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat
|
||||
" /^:[A-Z]+ [^-]/ { print $1 \" ACCEPT\" ; }" # Turn all policies to accept
|
||||
" /COMMIT/ { print $0; }'" # Keep the line COMMIT
|
||||
" | ip6tables-legacy-restore")
|
||||
except Exception as e:
|
||||
self.rollback()
|
||||
raise YunohostError("migration_0018_failed_to_reset_legacy_rules", error=e)
|
||||
|
||||
# You might be wondering "uh but is it really useful to
|
||||
# iptables-legacy-save | iptables-restore considering firewall_reload()
|
||||
# flush/resets everything anyway ?"
|
||||
# But the answer is : firewall_reload() only resets the *filter table.
|
||||
# On more complex setups (e.g. internet cube or docker) you will also
|
||||
# have rules in the *nat (or maybe *raw?) sections of iptables.
|
||||
firewall_reload()
|
||||
service_restart("fail2ban")
|
||||
|
||||
def rollback(self):
|
||||
|
||||
if self.do_ipv4:
|
||||
self.runcmd("iptables-legacy-restore < %s" % self.backup_rules_ipv4)
|
||||
if self.do_ipv6:
|
||||
self.runcmd("iptables-legacy-restore < %s" % self.backup_rules_ipv6)
|
||||
|
||||
def runcmd(self, cmd, raise_on_errors=True):
|
||||
|
||||
logger.debug("Running command: " + cmd)
|
||||
|
||||
p = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
executable='/bin/bash',
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
returncode = p.returncode
|
||||
if raise_on_errors and returncode != 0:
|
||||
raise YunohostError("Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(cmd, returncode, out, err))
|
||||
|
||||
out = out.strip().split("\n")
|
||||
return (returncode, out, err)
|
||||
|
|
@ -27,13 +27,13 @@ class MyMigration(Migration):
|
|||
ldap_regen_conf_status = regen_conf(names=['slapd'], dry_run=True)
|
||||
# By this we check if the have been customized
|
||||
if ldap_regen_conf_status and ldap_regen_conf_status['slapd']['pending']:
|
||||
logger.warning(m18n.n("migration_0011_slapd_config_will_be_overwritten", conf_backup_folder=BACKUP_CONF_DIR))
|
||||
logger.warning(m18n.n("migration_0019_slapd_config_will_be_overwritten", conf_backup_folder=BACKUP_CONF_DIR))
|
||||
|
||||
# Update LDAP schema restart slapd
|
||||
logger.info(m18n.n("migration_0011_update_LDAP_schema"))
|
||||
logger.info(m18n.n("migration_0019_update_LDAP_schema"))
|
||||
regen_conf(names=['slapd'], force=True)
|
||||
|
||||
logger.info(m18n.n("migration_0015_add_new_attributes_in_ldap"))
|
||||
logger.info(m18n.n("migration_0019_add_new_attributes_in_ldap"))
|
||||
ldap = _get_ldap_interface()
|
||||
permission_list = user_permission_list(short=True, full_path=False)["permissions"]
|
||||
|
||||
|
@ -83,7 +83,7 @@ class MyMigration(Migration):
|
|||
# just display a warning if we detect that the conf was manually modified
|
||||
|
||||
# Backup LDAP and the apps settings before to do the migration
|
||||
logger.info(m18n.n("migration_0011_backup_before_migration"))
|
||||
logger.info(m18n.n("migration_0019_backup_before_migration"))
|
||||
try:
|
||||
backup_folder = "/home/yunohost.backup/premigration/" + time.strftime('%Y%m%d-%H%M%S', time.gmtime())
|
||||
os.makedirs(backup_folder, 0o750)
|
||||
|
@ -92,7 +92,7 @@ class MyMigration(Migration):
|
|||
os.system("cp -r --preserve /var/lib/ldap %s/ldap_db" % backup_folder)
|
||||
os.system("cp -r --preserve /etc/yunohost/apps %s/apps_settings" % backup_folder)
|
||||
except Exception as e:
|
||||
raise YunohostError("migration_0011_can_not_backup_before_migration", error=e)
|
||||
raise YunohostError("migration_0019_can_not_backup_before_migration", error=e)
|
||||
finally:
|
||||
os.system("systemctl start slapd")
|
||||
|
||||
|
@ -103,7 +103,7 @@ class MyMigration(Migration):
|
|||
app_ssowatconf()
|
||||
|
||||
except Exception as e:
|
||||
logger.warn(m18n.n("migration_0011_migration_failed_trying_to_rollback"))
|
||||
logger.warn(m18n.n("migration_0019_migration_failed_trying_to_rollback"))
|
||||
os.system("systemctl stop slapd")
|
||||
os.system("rm -r /etc/ldap/slapd.d") # To be sure that we don't keep some part of the old config
|
||||
os.system("cp -r --preserve %s/ldap_config/. /etc/ldap/" % backup_folder)
|
||||
|
@ -111,7 +111,7 @@ class MyMigration(Migration):
|
|||
os.system("cp -r --preserve %s/apps_settings/. /etc/yunohost/apps/" % backup_folder)
|
||||
os.system("systemctl start slapd")
|
||||
os.system("rm -r " + backup_folder)
|
||||
logger.info(m18n.n("migration_0011_rollback_success"))
|
||||
logger.info(m18n.n("migration_0019_rollback_success"))
|
||||
raise
|
||||
else:
|
||||
os.system("rm -r " + backup_folder)
|
|
@ -89,6 +89,10 @@ def domain_add(operation_logger, domain, dyndns=False):
|
|||
raise YunohostError('domain_exists')
|
||||
|
||||
operation_logger.start()
|
||||
|
||||
# Lower domain to avoid some edge cases issues
|
||||
# See: https://forum.yunohost.org/t/invalid-domain-causes-diagnosis-web-to-fail-fr-on-demand/11765
|
||||
domain = domain.lower()
|
||||
|
||||
# DynDNS domain
|
||||
if dyndns:
|
||||
|
|
|
@ -205,22 +205,6 @@ def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None,
|
|||
|
||||
key = keys[0]
|
||||
|
||||
# This mean that hmac-md5 is used
|
||||
# (Re?)Trigger the migration to sha256 and return immediately.
|
||||
# The actual update will be done in next run.
|
||||
if "+157" in key:
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
migration = _get_migration_by_name("migrate_to_tsig_sha256")
|
||||
try:
|
||||
migration.run(dyn_host, domain, key)
|
||||
except Exception as e:
|
||||
logger.error(m18n.n('migrations_migration_has_failed',
|
||||
exception=e,
|
||||
number=migration.number,
|
||||
name=migration.name),
|
||||
exc_info=1)
|
||||
return
|
||||
|
||||
# Extract 'host', e.g. 'nohost.me' from 'foo.nohost.me'
|
||||
host = domain.split('.')[1:]
|
||||
host = '.'.join(host)
|
||||
|
|
|
@ -26,11 +26,7 @@
|
|||
import os
|
||||
import sys
|
||||
import yaml
|
||||
try:
|
||||
import miniupnpc
|
||||
except ImportError:
|
||||
sys.stderr.write('Error: Yunohost CLI Require miniupnpc lib\n')
|
||||
sys.exit(1)
|
||||
import miniupnpc
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
|
|
|
@ -35,6 +35,7 @@ from logging import FileHandler, getLogger, Formatter
|
|||
from moulinette import m18n, msettings
|
||||
from moulinette.core import MoulinetteError
|
||||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.utils.packages import get_ynh_package_version
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_file, read_yaml
|
||||
|
||||
|
@ -135,6 +136,29 @@ def log_display(path, number=None, share=False, filter_irrelevant=False):
|
|||
share
|
||||
"""
|
||||
|
||||
if share:
|
||||
filter_irrelevant = True
|
||||
|
||||
if filter_irrelevant:
|
||||
filters = [
|
||||
r"set [+-]x$",
|
||||
r"set [+-]o xtrace$",
|
||||
r"local \w+$",
|
||||
r"local legacy_args=.*$",
|
||||
r".*Helper used in legacy mode.*",
|
||||
r"args_array=.*$",
|
||||
r"local -A args_array$",
|
||||
r"ynh_handle_getopts_args",
|
||||
r"ynh_script_progression"
|
||||
]
|
||||
else:
|
||||
filters = []
|
||||
|
||||
def _filter_lines(lines, filters=[]):
|
||||
|
||||
filters = [re.compile(f) for f in filters]
|
||||
return [l for l in lines if not any(f.search(l.strip()) for f in filters)]
|
||||
|
||||
# Normalize log/metadata paths and filenames
|
||||
abs_path = path
|
||||
log_path = None
|
||||
|
@ -173,7 +197,8 @@ def log_display(path, number=None, share=False, filter_irrelevant=False):
|
|||
content += read_file(md_path)
|
||||
content += "\n============\n\n"
|
||||
if os.path.exists(log_path):
|
||||
content += read_file(log_path)
|
||||
actual_log = read_file(log_path)
|
||||
content += "\n".join(_filter_lines(actual_log.split("\n"), filters))
|
||||
|
||||
url = yunopaste(content)
|
||||
|
||||
|
@ -202,27 +227,12 @@ def log_display(path, number=None, share=False, filter_irrelevant=False):
|
|||
|
||||
# Display logs if exist
|
||||
if os.path.exists(log_path):
|
||||
|
||||
if filter_irrelevant:
|
||||
filters = [
|
||||
r"set [+-]x$",
|
||||
r"set [+-]o xtrace$",
|
||||
r"local \w+$",
|
||||
r"local legacy_args=.*$",
|
||||
r".*Helper used in legacy mode.*",
|
||||
r"args_array=.*$",
|
||||
r"local -A args_array$",
|
||||
r"ynh_handle_getopts_args",
|
||||
r"ynh_script_progression"
|
||||
]
|
||||
else:
|
||||
filters = []
|
||||
|
||||
from yunohost.service import _tail
|
||||
if number:
|
||||
logs = _tail(log_path, int(number), filters=filters)
|
||||
logs = _tail(log_path, int(number))
|
||||
else:
|
||||
logs = read_file(log_path)
|
||||
logs = _filter_lines(logs, filters)
|
||||
infos['log_path'] = log_path
|
||||
infos['logs'] = logs
|
||||
|
||||
|
@ -456,6 +466,7 @@ class OperationLogger(object):
|
|||
data = {
|
||||
'started_at': self.started_at,
|
||||
'operation': self.operation,
|
||||
'yunohost_version': get_ynh_package_version("yunohost")["version"],
|
||||
}
|
||||
if self.related_to is not None:
|
||||
data['related_to'] = self.related_to
|
||||
|
|
|
@ -62,16 +62,6 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
|
|||
|
||||
"""
|
||||
|
||||
# Legacy code to automatically run the migration
|
||||
# This is required because regen_conf is called before the migration call
|
||||
# in debian's postinst script
|
||||
if os.path.exists("/etc/yunohost/installed") \
|
||||
and ("conffiles" in read_file("/etc/yunohost/services.yml") \
|
||||
or not os.path.exists(REGEN_CONF_FILE)):
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
migration = _get_migration_by_name("decouple_regenconf_from_services")
|
||||
migration.run()
|
||||
|
||||
result = {}
|
||||
|
||||
# Return the list of pending conf
|
||||
|
@ -125,11 +115,12 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
|
|||
# return the arguments to pass to the script
|
||||
return pre_args + [category_pending_path, ]
|
||||
|
||||
# Don't regen SSH if not specifically specified
|
||||
ssh_explicitly_specified = isinstance(names, list) and "ssh" in names
|
||||
|
||||
# By default, we regen everything
|
||||
if not names:
|
||||
names = hook_list('conf_regen', list_by='name',
|
||||
show_info=False)['hooks']
|
||||
names.remove('ssh')
|
||||
|
||||
# Dirty hack for legacy code : avoid attempting to regen the conf for
|
||||
# glances because it got removed ... This is only needed *once*
|
||||
|
@ -185,6 +176,58 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
|
|||
succeed_regen = {}
|
||||
failed_regen = {}
|
||||
|
||||
# Here we are doing some weird legacy shit
|
||||
# The thing is, on some very old or specific setup, the sshd_config file
|
||||
# was absolutely not managed by the regenconf ...
|
||||
# But we now want to make sure that this file is managed.
|
||||
# However, we don't want to overwrite a specific custom sshd_config
|
||||
# which may make the admin unhappy ...
|
||||
# So : if the hash for this file does not exists, we set the hash as the
|
||||
# hash of the pending configuration ...
|
||||
# That way, the file will later appear as manually modified.
|
||||
sshd_config = "/etc/ssh/sshd_config"
|
||||
if category == "ssh" and sshd_config not in conf_hashes and sshd_config in conf_files:
|
||||
conf_hashes[sshd_config] = _calculate_hash(conf_files[sshd_config])
|
||||
_update_conf_hashes(category, conf_hashes)
|
||||
|
||||
# Consider the following scenario:
|
||||
# - you add a domain foo.bar
|
||||
# - the regen-conf creates file /etc/dnsmasq.d/foo.bar
|
||||
# - the admin manually *deletes* /etc/dnsmasq.d/foo.bar
|
||||
# - the file is now understood as manually deleted because there's the old file hash in regenconf.yml
|
||||
#
|
||||
# ... so far so good, that's the expected behavior.
|
||||
#
|
||||
# But then:
|
||||
# - the admin remove domain foo.bar entirely
|
||||
# - but now the hash for /etc/dnsmasq.d/foo.bar is *still* in
|
||||
# regenconf.yml and and the file is still flagged as manually
|
||||
# modified/deleted... And the user cannot even do anything about it
|
||||
# except removing the hash in regenconf.yml...
|
||||
#
|
||||
# Expected behavior: it should forget about that
|
||||
# hash because dnsmasq's regen-conf doesn't say anything about what's
|
||||
# the state of that file so it should assume that it should be deleted.
|
||||
#
|
||||
# - then the admin tries to *re-add* foo.bar !
|
||||
# - ... but because the file is still flagged as manually modified
|
||||
# the regen-conf refuses to re-create the file.
|
||||
#
|
||||
# Excepted behavior : the regen-conf should have forgot about the hash
|
||||
# from earlier and this wouldnt happen.
|
||||
# ------
|
||||
# conf_files contain files explicitly set by the current regen conf run
|
||||
# conf_hashes contain all files known from the past runs
|
||||
# we compare these to get the list of stale hashes and flag the file as
|
||||
# "should be removed"
|
||||
stale_files = set(conf_hashes.keys()) - set(conf_files.keys())
|
||||
stale_files_with_non_empty_hash = [f for f in stale_files if conf_hashes.get(f)]
|
||||
for f in stale_files_with_non_empty_hash:
|
||||
conf_files[f] = None
|
||||
# </> End discussion about stale file hashes
|
||||
|
||||
force_update_hashes_for_this_category = False
|
||||
|
||||
for system_path, pending_path in conf_files.items():
|
||||
logger.debug("processing pending conf '%s' to system conf '%s'",
|
||||
pending_path, system_path)
|
||||
|
@ -196,20 +239,46 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
|
|||
system_path, pending_path, True) if with_diff else None
|
||||
|
||||
# Check if the conf must be removed
|
||||
to_remove = True if os.path.getsize(pending_path) == 0 else False
|
||||
to_remove = True if pending_path and os.path.getsize(pending_path) == 0 else False
|
||||
|
||||
# Retrieve and calculate hashes
|
||||
system_hash = _calculate_hash(system_path)
|
||||
saved_hash = conf_hashes.get(system_path, None)
|
||||
new_hash = None if to_remove else _calculate_hash(pending_path)
|
||||
|
||||
# -> configuration was previously managed by yunohost but should now
|
||||
# be removed / unmanaged
|
||||
if system_path in stale_files_with_non_empty_hash:
|
||||
# File is already deleted, so let's just silently forget about this hash entirely
|
||||
if not system_hash:
|
||||
logger.debug("> forgetting about stale file/hash")
|
||||
conf_hashes[system_path] = None
|
||||
conf_status = 'forget-about-it'
|
||||
regenerated = True
|
||||
# Otherwise there's still a file on the system but it's not managed by
|
||||
# Yunohost anymore... But if user requested --force we shall
|
||||
# force-erase it
|
||||
elif force:
|
||||
logger.debug("> force-remove stale file")
|
||||
regenerated = _regen(system_path)
|
||||
conf_status = 'force-removed'
|
||||
# Otherwise, flag the file as manually modified
|
||||
else:
|
||||
logger.warning(m18n.n(
|
||||
'regenconf_file_manually_modified',
|
||||
conf=system_path))
|
||||
conf_status = 'modified'
|
||||
|
||||
# -> system conf does not exists
|
||||
if not system_hash:
|
||||
elif not system_hash:
|
||||
if to_remove:
|
||||
logger.debug("> system conf is already removed")
|
||||
os.remove(pending_path)
|
||||
conf_hashes[system_path] = None
|
||||
conf_status = 'forget-about-it'
|
||||
force_update_hashes_for_this_category = True
|
||||
continue
|
||||
if not saved_hash or force:
|
||||
elif not saved_hash or force:
|
||||
if force:
|
||||
logger.debug("> system conf has been manually removed")
|
||||
conf_status = 'force-created'
|
||||
|
@ -267,6 +336,9 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
|
|||
logger.debug("> new conf is as current system conf")
|
||||
conf_status = 'managed'
|
||||
regenerated = True
|
||||
elif force and system_path == sshd_config and not ssh_explicitly_specified:
|
||||
logger.warning(m18n.n('regenconf_need_to_explicitly_specify_ssh'))
|
||||
conf_status = 'modified'
|
||||
elif force:
|
||||
regenerated = _regen(system_path, pending_path)
|
||||
conf_status = 'force-updated'
|
||||
|
@ -283,7 +355,7 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
|
|||
if regenerated:
|
||||
succeed_regen[system_path] = conf_result
|
||||
conf_hashes[system_path] = new_hash
|
||||
if os.path.isfile(pending_path):
|
||||
if pending_path and os.path.isfile(pending_path):
|
||||
os.remove(pending_path)
|
||||
else:
|
||||
failed_regen[system_path] = conf_result
|
||||
|
@ -298,7 +370,7 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
|
|||
else:
|
||||
logger.success(m18n.n('regenconf_would_be_updated', category=category))
|
||||
|
||||
if succeed_regen and not dry_run:
|
||||
if (succeed_regen or force_update_hashes_for_this_category) and not dry_run:
|
||||
_update_conf_hashes(category, conf_hashes)
|
||||
|
||||
# Append the category results
|
||||
|
@ -368,13 +440,13 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True):
|
|||
|
||||
"""
|
||||
|
||||
if os.path.exists(orig_file):
|
||||
if orig_file and os.path.exists(orig_file):
|
||||
with open(orig_file, 'r') as orig_file:
|
||||
orig_file = orig_file.readlines()
|
||||
else:
|
||||
orig_file = []
|
||||
|
||||
if os.path.exists(new_file):
|
||||
if new_file and os.path.exists(new_file):
|
||||
with open(new_file, 'r') as new_file:
|
||||
new_file = new_file.readlines()
|
||||
else:
|
||||
|
@ -399,7 +471,7 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True):
|
|||
def _calculate_hash(path):
|
||||
"""Calculate the MD5 hash of a file"""
|
||||
|
||||
if not os.path.exists(path):
|
||||
if not path or not os.path.exists(path):
|
||||
return None
|
||||
|
||||
hasher = hashlib.md5()
|
||||
|
@ -485,6 +557,12 @@ def _update_conf_hashes(category, hashes):
|
|||
if category_conf is None:
|
||||
category_conf = {}
|
||||
|
||||
# If a file shall be removed and is indeed removed, forget entirely about
|
||||
# that path.
|
||||
# It avoid keeping weird old entries like
|
||||
# /etc/nginx/conf.d/some.domain.that.got.removed.conf
|
||||
hashes = {path: hash_ for path, hash_ in hashes.items() if hash_ is not None or os.path.exists(path)}
|
||||
|
||||
category_conf['conffiles'] = hashes
|
||||
categories[category] = category_conf
|
||||
_save_regenconf_infos(categories)
|
||||
|
|
|
@ -97,8 +97,8 @@ def service_add(name, description=None, log=None, log_type=None, test_status=Non
|
|||
service["test_status"] = test_status
|
||||
else:
|
||||
# Try to get the description from systemd service
|
||||
_, service = _get_service_information_from_systemd(name)
|
||||
type_ = service.get("Type") if service is not None else ""
|
||||
_, systemd_info = _get_service_information_from_systemd(name)
|
||||
type_ = systemd_info.get("Type") if systemd_info is not None else ""
|
||||
if type_ == "oneshot":
|
||||
logger.warning("/!\\ Packagers! Please provide a --test_status when adding oneshot-type services in Yunohost, such that it has a reliable way to check if the service is running or not.")
|
||||
|
||||
|
@ -618,7 +618,7 @@ def _get_services():
|
|||
if "postgresql" in services:
|
||||
if "description" in services["postgresql"]:
|
||||
del services["postgresql"]["description"]
|
||||
services["postgresql"]["actual_systemd_service"] = "postgresql@9.6-main"
|
||||
services["postgresql"]["actual_systemd_service"] = "postgresql@11-main"
|
||||
|
||||
return services
|
||||
|
||||
|
@ -639,7 +639,7 @@ def _save_services(services):
|
|||
raise
|
||||
|
||||
|
||||
def _tail(file, n, filters=[]):
|
||||
def _tail(file, n):
|
||||
"""
|
||||
Reads a n lines from f with an offset of offset lines. The return
|
||||
value is a tuple in the form ``(lines, has_more)`` where `has_more` is
|
||||
|
@ -650,8 +650,6 @@ def _tail(file, n, filters=[]):
|
|||
avg_line_length = 74
|
||||
to_read = n
|
||||
|
||||
if filters:
|
||||
filters = [re.compile(f) for f in filters]
|
||||
|
||||
try:
|
||||
if file.endswith(".gz"):
|
||||
|
@ -673,9 +671,6 @@ def _tail(file, n, filters=[]):
|
|||
pos = f.tell()
|
||||
lines = f.read().splitlines()
|
||||
|
||||
for filter_ in filters:
|
||||
lines = [l for l in lines if not filter_.search(l)]
|
||||
|
||||
if len(lines) >= to_read:
|
||||
return lines[-to_read:]
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ def user_ssh_allow(username):
|
|||
|
||||
from yunohost.utils.ldap import _get_ldap_interface
|
||||
ldap = _get_ldap_interface()
|
||||
ldap.update('uid=%s,ou=users' % username, {'loginShell': '/bin/bash'})
|
||||
ldap.update('uid=%s,ou=users' % username, {'loginShell': ['/bin/bash']})
|
||||
|
||||
# Somehow this is needed otherwise the PAM thing doesn't forget about the
|
||||
# old loginShell value ?
|
||||
|
@ -46,7 +46,7 @@ def user_ssh_disallow(username):
|
|||
|
||||
from yunohost.utils.ldap import _get_ldap_interface
|
||||
ldap = _get_ldap_interface()
|
||||
ldap.update('uid=%s,ou=users' % username, {'loginShell': '/bin/false'})
|
||||
ldap.update('uid=%s,ou=users' % username, {'loginShell': ['/bin/false']})
|
||||
|
||||
# Somehow this is needed otherwise the PAM thing doesn't forget about the
|
||||
# old loginShell value ?
|
||||
|
|
|
@ -315,58 +315,3 @@ def test_apps_catalog_load_with_oudated_api_version(mocker):
|
|||
for cache_file in glob.glob(APPS_CATALOG_CACHE + "/*"):
|
||||
cache_json = read_json(cache_file)
|
||||
assert cache_json["from_api_version"] == APPS_CATALOG_API_VERSION
|
||||
|
||||
|
||||
|
||||
def test_apps_catalog_migrate_legacy_explicitly():
|
||||
|
||||
open("/etc/yunohost/appslists.json", "w").write('{"yunohost": {"yolo":"swag"}}')
|
||||
mkdir(APPS_CATALOG_CACHE, 0o750, parents=True)
|
||||
open(APPS_CATALOG_CACHE+"/yunohost_old.json", "w").write('{"foo":{}, "bar": {}}')
|
||||
open(APPS_CATALOG_CRON_PATH, "w").write("# Some old cron")
|
||||
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
migration = _get_migration_by_name("futureproof_apps_catalog_system")
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
|
||||
# Mock the server response with a dummy apps catalog
|
||||
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
|
||||
migration.run()
|
||||
|
||||
# Old conf shouldnt be there anymore (got renamed to .old)
|
||||
assert not os.path.exists("/etc/yunohost/appslists.json")
|
||||
# Old cache should have been removed
|
||||
assert not os.path.exists(APPS_CATALOG_CACHE+"/yunohost_old.json")
|
||||
# Cron should have been changed
|
||||
assert "/bin/bash" in open(APPS_CATALOG_CRON_PATH, "r").read()
|
||||
assert cron_job_is_there()
|
||||
|
||||
# Reading the apps_catalog should work
|
||||
app_dict = _load_apps_catalog()["apps"]
|
||||
assert "foo" in app_dict.keys()
|
||||
assert "bar" in app_dict.keys()
|
||||
|
||||
|
||||
def test_apps_catalog_migrate_legacy_implicitly():
|
||||
|
||||
open("/etc/yunohost/appslists.json", "w").write('{"yunohost": {"yolo":"swag"}}')
|
||||
mkdir(APPS_CATALOG_CACHE, 0o750, parents=True)
|
||||
open(APPS_CATALOG_CACHE+"/yunohost_old.json", "w").write('{"old_foo":{}, "old_bar": {}}')
|
||||
open(APPS_CATALOG_CRON_PATH, "w").write("# Some old cron")
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
|
||||
app_dict = _load_apps_catalog()["apps"]
|
||||
|
||||
assert "foo" in app_dict.keys()
|
||||
assert "bar" in app_dict.keys()
|
||||
|
||||
# Old conf shouldnt be there anymore (got renamed to .old)
|
||||
assert not os.path.exists("/etc/yunohost/appslists.json")
|
||||
# Old cache should have been removed
|
||||
assert not os.path.exists(APPS_CATALOG_CACHE+"/yunohost_old.json")
|
||||
# Cron should have been changed
|
||||
assert "/bin/bash" in open(APPS_CATALOG_CRON_PATH, "r").read()
|
||||
assert cron_job_is_there()
|
||||
|
||||
|
|
|
@ -1,31 +1,26 @@
|
|||
import glob
|
||||
import os
|
||||
import pytest
|
||||
import shutil
|
||||
import requests
|
||||
|
||||
from conftest import message, raiseYunohostError
|
||||
|
||||
from moulinette import m18n
|
||||
from moulinette.utils.filesystem import mkdir
|
||||
|
||||
from yunohost.domain import _get_maindomain, domain_add, domain_remove, domain_list
|
||||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.regenconf import manually_modified_files, _get_conf_hashes, _force_clear_hashes
|
||||
from conftest import message
|
||||
from yunohost.domain import domain_add, domain_remove, domain_list
|
||||
from yunohost.regenconf import regen_conf, manually_modified_files, _get_conf_hashes, _force_clear_hashes
|
||||
|
||||
TEST_DOMAIN = "secondarydomain.test"
|
||||
TEST_DOMAIN_NGINX_CONFIG = "/etc/nginx/conf.d/secondarydomain.test.conf"
|
||||
TEST_DOMAIN_NGINX_CONFIG = "/etc/nginx/conf.d/%s.conf" % TEST_DOMAIN
|
||||
TEST_DOMAIN_DNSMASQ_CONFIG = "/etc/dnsmasq.d/%s" % TEST_DOMAIN
|
||||
SSHD_CONFIG = "/etc/ssh/sshd_config"
|
||||
|
||||
def setup_function(function):
|
||||
|
||||
_force_clear_hashes([TEST_DOMAIN_NGINX_CONFIG])
|
||||
clean()
|
||||
|
||||
|
||||
def teardown_function(function):
|
||||
|
||||
clean()
|
||||
_force_clear_hashes([TEST_DOMAIN_NGINX_CONFIG])
|
||||
|
||||
|
||||
def clean():
|
||||
|
||||
assert os.system("pgrep slapd >/dev/null") == 0
|
||||
|
@ -43,6 +38,8 @@ def clean():
|
|||
assert TEST_DOMAIN_NGINX_CONFIG not in _get_conf_hashes("nginx")
|
||||
assert TEST_DOMAIN_NGINX_CONFIG not in manually_modified_files()
|
||||
|
||||
regen_conf(['ssh'], force=True)
|
||||
|
||||
|
||||
def test_add_domain():
|
||||
|
||||
|
@ -78,3 +75,129 @@ def test_add_domain_conf_already_exists():
|
|||
assert os.path.exists(TEST_DOMAIN_NGINX_CONFIG)
|
||||
assert TEST_DOMAIN_NGINX_CONFIG in _get_conf_hashes("nginx")
|
||||
assert TEST_DOMAIN_NGINX_CONFIG not in manually_modified_files()
|
||||
|
||||
|
||||
def test_ssh_conf_unmanaged():
|
||||
|
||||
_force_clear_hashes([SSHD_CONFIG])
|
||||
|
||||
assert SSHD_CONFIG not in _get_conf_hashes("ssh")
|
||||
|
||||
regen_conf()
|
||||
|
||||
assert SSHD_CONFIG in _get_conf_hashes("ssh")
|
||||
|
||||
|
||||
def test_ssh_conf_unmanaged_and_manually_modified(mocker):
|
||||
|
||||
_force_clear_hashes([SSHD_CONFIG])
|
||||
os.system("echo ' ' >> %s" % SSHD_CONFIG)
|
||||
|
||||
assert SSHD_CONFIG not in _get_conf_hashes("ssh")
|
||||
|
||||
regen_conf()
|
||||
|
||||
assert SSHD_CONFIG in _get_conf_hashes("ssh")
|
||||
assert SSHD_CONFIG in manually_modified_files()
|
||||
|
||||
with message(mocker, "regenconf_need_to_explicitly_specify_ssh"):
|
||||
regen_conf(force=True)
|
||||
|
||||
assert SSHD_CONFIG in _get_conf_hashes("ssh")
|
||||
assert SSHD_CONFIG in manually_modified_files()
|
||||
|
||||
regen_conf(['ssh'], force=True)
|
||||
|
||||
assert SSHD_CONFIG in _get_conf_hashes("ssh")
|
||||
assert SSHD_CONFIG not in manually_modified_files()
|
||||
|
||||
|
||||
def test_stale_hashes_get_removed_if_empty():
|
||||
"""
|
||||
This is intended to test that if a file gets removed and is indeed removed,
|
||||
we don't keep a useless empty hash corresponding to an old file.
|
||||
In this case, we test this using the dnsmasq conf file (we don't do this
|
||||
using the nginx conf file because it's already force-removed during
|
||||
domain_remove())
|
||||
"""
|
||||
|
||||
domain_add(TEST_DOMAIN)
|
||||
|
||||
assert os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
assert TEST_DOMAIN_DNSMASQ_CONFIG in _get_conf_hashes("dnsmasq")
|
||||
|
||||
domain_remove(TEST_DOMAIN)
|
||||
|
||||
assert not os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
assert TEST_DOMAIN_DNSMASQ_CONFIG not in _get_conf_hashes("dnsmasq")
|
||||
|
||||
|
||||
def test_stale_hashes_if_file_manually_deleted():
|
||||
"""
|
||||
Same as other test, but manually delete the file in between and check
|
||||
behavior
|
||||
"""
|
||||
|
||||
domain_add(TEST_DOMAIN)
|
||||
|
||||
assert os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
assert TEST_DOMAIN_DNSMASQ_CONFIG in _get_conf_hashes("dnsmasq")
|
||||
|
||||
os.remove(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
|
||||
assert not os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
|
||||
regen_conf(names=["dnsmasq"])
|
||||
|
||||
assert not os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
assert TEST_DOMAIN_DNSMASQ_CONFIG in _get_conf_hashes("dnsmasq")
|
||||
|
||||
domain_remove(TEST_DOMAIN)
|
||||
|
||||
assert not os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
assert TEST_DOMAIN_DNSMASQ_CONFIG not in _get_conf_hashes("dnsmasq")
|
||||
|
||||
# This test only works if you comment the part at the end of the regen-conf in
|
||||
# dnsmasq that auto-flag /etc/dnsmasq.d/foo.bar as "to be removed" (using touch)
|
||||
# ... But we want to keep it because they also possibly flag files that were
|
||||
# never known by the regen-conf (e.g. if somebody adds a
|
||||
# /etc/dnsmasq.d/my.custom.extension)
|
||||
# Ideally we could use a system that's able to properly state 'no file in this
|
||||
# folder should exist except the ones excplicitly defined by regen-conf' but
|
||||
# that's too much work for the scope of this commit.
|
||||
#
|
||||
# ... Anyway, the proper way to write these tests would be to use a dummy
|
||||
# regen-conf hook just for tests but meh I'm lazy
|
||||
#
|
||||
#def test_stale_hashes_if_file_manually_modified():
|
||||
# """
|
||||
# Same as other test, but manually delete the file in between and check
|
||||
# behavior
|
||||
# """
|
||||
#
|
||||
# domain_add(TEST_DOMAIN)
|
||||
#
|
||||
# assert os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
# assert TEST_DOMAIN_DNSMASQ_CONFIG in _get_conf_hashes("dnsmasq")
|
||||
#
|
||||
# os.system("echo '#pwet' > %s" % TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
#
|
||||
# assert os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
# assert open(TEST_DOMAIN_DNSMASQ_CONFIG).read().strip() == "#pwet"
|
||||
#
|
||||
# regen_conf(names=["dnsmasq"])
|
||||
#
|
||||
# assert os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
# assert open(TEST_DOMAIN_DNSMASQ_CONFIG).read().strip() == "#pwet"
|
||||
# assert TEST_DOMAIN_DNSMASQ_CONFIG in _get_conf_hashes("dnsmasq")
|
||||
#
|
||||
# domain_remove(TEST_DOMAIN)
|
||||
#
|
||||
# assert os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
# assert open(TEST_DOMAIN_DNSMASQ_CONFIG).read().strip() == "#pwet"
|
||||
# assert TEST_DOMAIN_DNSMASQ_CONFIG in _get_conf_hashes("dnsmasq")
|
||||
#
|
||||
# regen_conf(names=["dnsmasq"], force=True)
|
||||
#
|
||||
# assert not os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
|
||||
# assert TEST_DOMAIN_DNSMASQ_CONFIG not in _get_conf_hashes("dnsmasq")
|
||||
|
|
|
@ -88,15 +88,15 @@ def tools_ldapinit():
|
|||
logger.warn("Error when trying to inject '%s' -> '%s' into ldap: %s" % (rdn, attr_dict, e))
|
||||
|
||||
admin_dict = {
|
||||
'cn': 'admin',
|
||||
'uid': 'admin',
|
||||
'description': 'LDAP Administrator',
|
||||
'gidNumber': '1007',
|
||||
'uidNumber': '1007',
|
||||
'homeDirectory': '/home/admin',
|
||||
'loginShell': '/bin/bash',
|
||||
'cn': ['admin'],
|
||||
'uid': ['admin'],
|
||||
'description': ['LDAP Administrator'],
|
||||
'gidNumber': ['1007'],
|
||||
'uidNumber': ['1007'],
|
||||
'homeDirectory': ['/home/admin'],
|
||||
'loginShell': ['/bin/bash'],
|
||||
'objectClass': ['organizationalRole', 'posixAccount', 'simpleSecurityObject'],
|
||||
'userPassword': 'yunohost'
|
||||
'userPassword': ['yunohost']
|
||||
}
|
||||
|
||||
ldap.update('cn=admin', admin_dict)
|
||||
|
@ -111,6 +111,14 @@ def tools_ldapinit():
|
|||
logger.error(m18n.n('ldap_init_failed_to_create_admin'))
|
||||
raise YunohostError('installation_failed')
|
||||
|
||||
try:
|
||||
# Attempt to create user home folder
|
||||
subprocess.check_call(["mkhomedir_helper", "admin"])
|
||||
except subprocess.CalledProcessError:
|
||||
if not os.path.isdir('/home/{0}'.format("admin")):
|
||||
logger.warning(m18n.n('user_home_creation_failed'),
|
||||
exc_info=1)
|
||||
|
||||
logger.success(m18n.n('ldap_initialized'))
|
||||
|
||||
|
||||
|
@ -140,7 +148,7 @@ def tools_adminpw(new_password, check_strength=True):
|
|||
ldap = _get_ldap_interface()
|
||||
|
||||
try:
|
||||
ldap.update("cn=admin", {"userPassword": new_hash, })
|
||||
ldap.update("cn=admin", {"userPassword": [ new_hash ], })
|
||||
except:
|
||||
logger.exception('unable to change admin password')
|
||||
raise YunohostError('admin_password_change_failed')
|
||||
|
@ -359,6 +367,12 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False,
|
|||
except Exception as e:
|
||||
logger.warning(str(e))
|
||||
|
||||
# Create the archive directory (makes it easier for people to upload backup
|
||||
# archives, otherwise it's only created after running `yunohost backup
|
||||
# create` once.
|
||||
from yunohost.backup import _create_archive_dir
|
||||
_create_archive_dir()
|
||||
|
||||
# Init migrations (skip them, no need to run them on a fresh system)
|
||||
_skip_all_migrations()
|
||||
|
||||
|
@ -368,7 +382,7 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False,
|
|||
service_enable("yunohost-firewall")
|
||||
service_start("yunohost-firewall")
|
||||
|
||||
regen_conf(force=True)
|
||||
regen_conf(names=["ssh"], force=True)
|
||||
|
||||
# Restore original ssh conf, as chosen by the
|
||||
# admin during the initial install
|
||||
|
@ -382,10 +396,8 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False,
|
|||
original_sshd_conf = '/etc/ssh/sshd_config.before_yunohost'
|
||||
if os.path.exists(original_sshd_conf):
|
||||
os.rename(original_sshd_conf, '/etc/ssh/sshd_config')
|
||||
else:
|
||||
# We need to explicitly ask the regen conf to regen ssh
|
||||
# (by default, i.e. first argument = None, it won't because it's too touchy)
|
||||
regen_conf(names=["ssh"], force=True)
|
||||
|
||||
regen_conf(force=True)
|
||||
|
||||
logger.success(m18n.n('yunohost_configured'))
|
||||
|
||||
|
@ -416,7 +428,7 @@ def tools_update(apps=False, system=False):
|
|||
|
||||
# Update APT cache
|
||||
# LC_ALL=C is here to make sure the results are in english
|
||||
command = "LC_ALL=C apt update"
|
||||
command = "LC_ALL=C apt-get update -o Acquire::Retries=3"
|
||||
|
||||
# Filter boring message about "apt not having a stable CLI interface"
|
||||
# Also keep track of wether or not we encountered a warning...
|
||||
|
@ -492,7 +504,7 @@ def _list_upgradable_apps():
|
|||
|
||||
|
||||
@is_unit_operation()
|
||||
def tools_upgrade(operation_logger, apps=None, system=False):
|
||||
def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgrade=True):
|
||||
"""
|
||||
Update apps & package cache, then display changelog
|
||||
|
||||
|
@ -555,7 +567,7 @@ def tools_upgrade(operation_logger, apps=None, system=False):
|
|||
|
||||
# Critical packages are packages that we can't just upgrade
|
||||
# randomly from yunohost itself... upgrading them is likely to
|
||||
critical_packages = ("moulinette", "yunohost", "yunohost-admin", "ssowat", "python")
|
||||
critical_packages = ["moulinette", "yunohost", "yunohost-admin", "ssowat"]
|
||||
|
||||
critical_packages_upgradable = [p["name"] for p in upgradables if p["name"] in critical_packages]
|
||||
noncritical_packages_upgradable = [p["name"] for p in upgradables if p["name"] not in critical_packages]
|
||||
|
@ -589,12 +601,17 @@ def tools_upgrade(operation_logger, apps=None, system=False):
|
|||
|
||||
logger.debug("Running apt command :\n{}".format(dist_upgrade))
|
||||
|
||||
|
||||
def is_relevant(l):
|
||||
return "Reading database ..." not in l.rstrip()
|
||||
irrelevants = [
|
||||
"service sudo-ldap already provided",
|
||||
"Reading database ..."
|
||||
]
|
||||
return all(i not in l.rstrip() for i in irrelevants)
|
||||
|
||||
callbacks = (
|
||||
lambda l: logger.info("+ " + l.rstrip() + "\r") if is_relevant(l) else logger.debug(l.rstrip() + "\r"),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
lambda l: logger.warning(l.rstrip()) if is_relevant(l) else logger.debug(l.rstrip()),
|
||||
)
|
||||
returncode = call_async_output(dist_upgrade, callbacks, shell=True)
|
||||
if returncode != 0:
|
||||
|
@ -608,7 +625,7 @@ def tools_upgrade(operation_logger, apps=None, system=False):
|
|||
#
|
||||
# Critical packages upgrade
|
||||
#
|
||||
if critical_packages_upgradable:
|
||||
if critical_packages_upgradable and allow_yunohost_upgrade:
|
||||
|
||||
logger.info(m18n.n("tools_upgrade_special_packages"))
|
||||
|
||||
|
|
|
@ -178,19 +178,19 @@ def user_create(operation_logger, username, firstname, lastname, mail, password,
|
|||
fullname = '%s %s' % (firstname, lastname)
|
||||
attr_dict = {
|
||||
'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh'],
|
||||
'givenName': firstname,
|
||||
'sn': lastname,
|
||||
'displayName': fullname,
|
||||
'cn': fullname,
|
||||
'uid': username,
|
||||
'mail': mail,
|
||||
'maildrop': username,
|
||||
'mailuserquota': mailbox_quota,
|
||||
'userPassword': _hash_user_password(password),
|
||||
'gidNumber': uid,
|
||||
'uidNumber': uid,
|
||||
'homeDirectory': '/home/' + username,
|
||||
'loginShell': '/bin/false'
|
||||
'givenName': [firstname],
|
||||
'sn': [lastname],
|
||||
'displayName': [fullname],
|
||||
'cn': [fullname],
|
||||
'uid': [username],
|
||||
'mail': mail, # NOTE: this one seems to be already a list
|
||||
'maildrop': [username],
|
||||
'mailuserquota': [mailbox_quota],
|
||||
'userPassword': [_hash_user_password(password)],
|
||||
'gidNumber': [uid],
|
||||
'uidNumber': [uid],
|
||||
'homeDirectory': ['/home/' + username],
|
||||
'loginShell': ['/bin/false']
|
||||
}
|
||||
|
||||
# If it is the first user, add some aliases
|
||||
|
@ -208,8 +208,7 @@ def user_create(operation_logger, username, firstname, lastname, mail, password,
|
|||
|
||||
try:
|
||||
# Attempt to create user home folder
|
||||
subprocess.check_call(
|
||||
['su', '-', username, '-c', "''"])
|
||||
subprocess.check_call(["mkhomedir_helper", username])
|
||||
except subprocess.CalledProcessError:
|
||||
if not os.path.isdir('/home/{0}'.format(username)):
|
||||
logger.warning(m18n.n('user_home_creation_failed'),
|
||||
|
@ -317,21 +316,21 @@ def user_update(operation_logger, username, firstname=None, lastname=None, mail=
|
|||
# Get modifications from arguments
|
||||
new_attr_dict = {}
|
||||
if firstname:
|
||||
new_attr_dict['givenName'] = firstname # TODO: Validate
|
||||
new_attr_dict['cn'] = new_attr_dict['displayName'] = firstname + ' ' + user['sn'][0]
|
||||
new_attr_dict['givenName'] = [firstname] # TODO: Validate
|
||||
new_attr_dict['cn'] = new_attr_dict['displayName'] = [firstname + ' ' + user['sn'][0]]
|
||||
|
||||
if lastname:
|
||||
new_attr_dict['sn'] = lastname # TODO: Validate
|
||||
new_attr_dict['cn'] = new_attr_dict['displayName'] = user['givenName'][0] + ' ' + lastname
|
||||
new_attr_dict['sn'] = [lastname] # TODO: Validate
|
||||
new_attr_dict['cn'] = new_attr_dict['displayName'] = [user['givenName'][0] + ' ' + lastname]
|
||||
|
||||
if lastname and firstname:
|
||||
new_attr_dict['cn'] = new_attr_dict['displayName'] = firstname + ' ' + lastname
|
||||
new_attr_dict['cn'] = new_attr_dict['displayName'] = [firstname + ' ' + lastname]
|
||||
|
||||
if change_password:
|
||||
# Ensure sufficiently complex password
|
||||
assert_password_is_strong_enough("user", change_password)
|
||||
|
||||
new_attr_dict['userPassword'] = _hash_user_password(change_password)
|
||||
new_attr_dict['userPassword'] = [_hash_user_password(change_password)]
|
||||
|
||||
if mail:
|
||||
main_domain = _get_maindomain()
|
||||
|
@ -396,7 +395,7 @@ def user_update(operation_logger, username, firstname=None, lastname=None, mail=
|
|||
new_attr_dict['maildrop'] = user['maildrop']
|
||||
|
||||
if mailbox_quota is not None:
|
||||
new_attr_dict['mailuserquota'] = mailbox_quota
|
||||
new_attr_dict['mailuserquota'] = [mailbox_quota]
|
||||
|
||||
operation_logger.start()
|
||||
|
||||
|
|
114
src/yunohost/utils/legacy.py
Normal file
114
src/yunohost/utils/legacy.py
Normal file
|
@ -0,0 +1,114 @@
|
|||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_yaml
|
||||
|
||||
from yunohost.user import user_list, user_group_create, user_group_update
|
||||
from yunohost.app import app_setting, _installed_apps
|
||||
from yunohost.permission import permission_create, user_permission_update, permission_sync_to_user
|
||||
|
||||
logger = getActionLogger('yunohost.legacy')
|
||||
|
||||
|
||||
class SetupGroupPermissions():
|
||||
|
||||
@staticmethod
|
||||
def remove_if_exists(target):
|
||||
|
||||
from yunohost.utils.ldap import _get_ldap_interface
|
||||
ldap = _get_ldap_interface()
|
||||
|
||||
try:
|
||||
objects = ldap.search(target + ",dc=yunohost,dc=org")
|
||||
# ldap search will raise an exception if no corresponding object is found >.> ...
|
||||
except Exception as e:
|
||||
logger.debug("%s does not exist, no need to delete it" % target)
|
||||
return
|
||||
|
||||
objects.reverse()
|
||||
for o in objects:
|
||||
for dn in o["dn"]:
|
||||
dn = dn.replace(",dc=yunohost,dc=org", "")
|
||||
logger.debug("Deleting old object %s ..." % dn)
|
||||
try:
|
||||
ldap.remove(dn)
|
||||
except Exception as e:
|
||||
raise YunohostError("migration_0019_failed_to_remove_stale_object", dn=dn, error=e)
|
||||
|
||||
@staticmethod
|
||||
def migrate_LDAP_db():
|
||||
|
||||
logger.info(m18n.n("migration_0019_update_LDAP_database"))
|
||||
|
||||
from yunohost.utils.ldap import _get_ldap_interface
|
||||
ldap = _get_ldap_interface()
|
||||
|
||||
ldap_map = read_yaml('/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml')
|
||||
|
||||
try:
|
||||
SetupGroupPermissions.remove_if_exists("ou=permission")
|
||||
SetupGroupPermissions.remove_if_exists('ou=groups')
|
||||
|
||||
attr_dict = ldap_map['parents']['ou=permission']
|
||||
ldap.add('ou=permission', attr_dict)
|
||||
|
||||
attr_dict = ldap_map['parents']['ou=groups']
|
||||
ldap.add('ou=groups', attr_dict)
|
||||
|
||||
attr_dict = ldap_map['children']['cn=all_users,ou=groups']
|
||||
ldap.add('cn=all_users,ou=groups', attr_dict)
|
||||
|
||||
attr_dict = ldap_map['children']['cn=visitors,ou=groups']
|
||||
ldap.add('cn=visitors,ou=groups', attr_dict)
|
||||
|
||||
for rdn, attr_dict in ldap_map['depends_children'].items():
|
||||
ldap.add(rdn, attr_dict)
|
||||
except Exception as e:
|
||||
raise YunohostError("migration_0019_LDAP_update_failed", error=e)
|
||||
|
||||
logger.info(m18n.n("migration_0019_create_group"))
|
||||
|
||||
# Create a group for each yunohost user
|
||||
user_list = ldap.search('ou=users,dc=yunohost,dc=org',
|
||||
'(&(objectclass=person)(!(uid=root))(!(uid=nobody)))',
|
||||
['uid', 'uidNumber'])
|
||||
for user_info in user_list:
|
||||
username = user_info['uid'][0]
|
||||
ldap.update('uid=%s,ou=users' % username,
|
||||
{'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh']})
|
||||
user_group_create(username, gid=user_info['uidNumber'][0], primary_group=True, sync_perm=False)
|
||||
user_group_update(groupname='all_users', add=username, force=True, sync_perm=False)
|
||||
|
||||
@staticmethod
|
||||
def migrate_app_permission(app=None):
|
||||
logger.info(m18n.n("migration_0019_migrate_permission"))
|
||||
|
||||
apps = _installed_apps()
|
||||
|
||||
if app:
|
||||
if app not in apps:
|
||||
logger.error("Can't migrate permission for app %s because it ain't installed..." % app)
|
||||
apps = []
|
||||
else:
|
||||
apps = [app]
|
||||
|
||||
for app in apps:
|
||||
permission = app_setting(app, 'allowed_users')
|
||||
path = app_setting(app, 'path')
|
||||
domain = app_setting(app, 'domain')
|
||||
|
||||
url = "/" if domain and path else None
|
||||
if permission:
|
||||
known_users = user_list()["users"].keys()
|
||||
allowed = [user for user in permission.split(',') if user in known_users]
|
||||
else:
|
||||
allowed = ["all_users"]
|
||||
permission_create(app + ".main", url=url, allowed=allowed, protected=False, sync_perm=False)
|
||||
|
||||
app_setting(app, 'allowed_users', delete=True)
|
||||
|
||||
# Migrate classic public app still using the legacy unprotected_uris
|
||||
if app_setting(app, "unprotected_uris") == "/" or app_setting(app, "skipped_uris") == "/":
|
||||
user_permission_update(app + ".main", add="visitors", sync_perm=False)
|
||||
|
||||
permission_sync_to_user()
|
25
tox.ini
25
tox.ini
|
@ -1,25 +1,12 @@
|
|||
[tox]
|
||||
envlist =
|
||||
py27
|
||||
lint
|
||||
skipdist = True
|
||||
envlist = py{27,37}-{lint,invalidcode},py37-black
|
||||
|
||||
[testenv]
|
||||
skip_install=True
|
||||
deps =
|
||||
pytest >= 4.6.3, < 5.0
|
||||
pyyaml >= 5.1.2, < 6.0
|
||||
flake8 >= 3.7.9, < 3.8
|
||||
urllib3
|
||||
py{27,37}-{lint,invalidcode}: flake8
|
||||
py37-black: black
|
||||
commands =
|
||||
pytest {posargs}
|
||||
|
||||
[testenv:lint]
|
||||
skip_install=True
|
||||
commands = flake8 src doc data tests
|
||||
deps = flake8
|
||||
|
||||
[testenv:invalidcode]
|
||||
skip_install=True
|
||||
commands = flake8 src data --exclude src/yunohost/tests --select F --ignore F401,F841
|
||||
deps = flake8
|
||||
py{27,37}-lint: flake8 src doc data tests
|
||||
py{27,37}-invalidcode: flake8 src data --exclude src/yunohost/tests --select F --ignore F401,F841
|
||||
py37-black: black --check --diff src doc data tests
|
Loading…
Add table
Reference in a new issue