mirror of
https://github.com/YunoHost/yunohost.git
synced 2024-09-03 20:06:10 +02:00
Merge branch '4.1' into default-value-for-user-args
This commit is contained in:
commit
8a5914c2e7
125 changed files with 4732 additions and 3644 deletions
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -13,10 +13,3 @@
|
|||
## How to test
|
||||
|
||||
...
|
||||
|
||||
## Validation
|
||||
|
||||
- [ ] Principle agreement 0/2 :
|
||||
- [ ] Quick review 0/1 :
|
||||
- [ ] Simple test 0/1 :
|
||||
- [ ] Deep review 0/1 :
|
||||
|
|
134
.gitlab-ci.yml
134
.gitlab-ci.yml
|
@ -1,124 +1,22 @@
|
|||
stages:
|
||||
- postinstall
|
||||
- build
|
||||
- install
|
||||
- tests
|
||||
- lint
|
||||
- doc
|
||||
|
||||
########################################
|
||||
# POSTINSTALL
|
||||
########################################
|
||||
default:
|
||||
tags:
|
||||
- yunohost-ci
|
||||
# All jobs are interruptible by default
|
||||
interruptible: true
|
||||
|
||||
postinstall:
|
||||
image: before-postinstall
|
||||
stage: postinstall
|
||||
script:
|
||||
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns
|
||||
variables:
|
||||
YNH_BUILD_DIR: "ynh-build"
|
||||
|
||||
########################################
|
||||
# TESTS
|
||||
########################################
|
||||
|
||||
.test-stage:
|
||||
image: after-postinstall
|
||||
stage: tests
|
||||
before_script:
|
||||
- apt-get install python-pip -y
|
||||
- mkdir -p .pip
|
||||
- pip install -U pip
|
||||
- hash -d pip
|
||||
- pip --cache-dir=.pip install pytest pytest-sugar pytest-mock requests-mock mock
|
||||
- export PYTEST_ADDOPTS="--color=yes"
|
||||
cache:
|
||||
paths:
|
||||
- .pip
|
||||
- src/yunohost/tests/apps
|
||||
key: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"
|
||||
|
||||
root-tests:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- py.test tests
|
||||
|
||||
test-apps:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_apps.py
|
||||
|
||||
test-appscatalog:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_appscatalog.py
|
||||
|
||||
test-appurl:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_appurl.py
|
||||
|
||||
test-backuprestore:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_backuprestore.py
|
||||
|
||||
test-changeurl:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_changeurl.py
|
||||
|
||||
test-permission:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_permission.py
|
||||
|
||||
test-settings:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_settings.py
|
||||
|
||||
test-user-group:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_user-group.py
|
||||
|
||||
test-regenconf:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_regenconf.py
|
||||
|
||||
########################################
|
||||
# LINTER
|
||||
########################################
|
||||
|
||||
.lint-stage:
|
||||
image: before-postinstall
|
||||
stage: lint
|
||||
before_script:
|
||||
- apt-get install python-pip -y
|
||||
- mkdir -p .pip
|
||||
- pip install -U pip
|
||||
- hash -d pip
|
||||
- pip --cache-dir=.pip install tox
|
||||
cache:
|
||||
paths:
|
||||
- .pip
|
||||
- .tox
|
||||
key: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"
|
||||
|
||||
lint:
|
||||
extends: .lint-stage
|
||||
allow_failure: true
|
||||
script:
|
||||
- tox -e lint
|
||||
|
||||
# Disabled, waiting for buster
|
||||
#format-check:
|
||||
# extends: .lint-stage
|
||||
# script:
|
||||
# - black --check --diff
|
||||
include:
|
||||
- local: .gitlab/ci/build.gitlab-ci.yml
|
||||
- local: .gitlab/ci/install.gitlab-ci.yml
|
||||
- local: .gitlab/ci/test.gitlab-ci.yml
|
||||
- local: .gitlab/ci/lint.gitlab-ci.yml
|
||||
- local: .gitlab/ci/doc.gitlab-ci.yml
|
||||
|
|
52
.gitlab/ci/build.gitlab-ci.yml
Normal file
52
.gitlab/ci/build.gitlab-ci.yml
Normal file
|
@ -0,0 +1,52 @@
|
|||
.build-stage:
|
||||
stage: build
|
||||
image: "before-install"
|
||||
variables:
|
||||
YNH_SOURCE: "https://github.com/yunohost"
|
||||
before_script:
|
||||
- mkdir -p $YNH_BUILD_DIR
|
||||
artifacts:
|
||||
paths:
|
||||
- $YNH_BUILD_DIR/*.deb
|
||||
|
||||
.build_script: &build_script
|
||||
- cd $YNH_BUILD_DIR/$PACKAGE
|
||||
- VERSION=$(dpkg-parsechangelog -S Version 2>/dev/null)
|
||||
- VERSION_NIGHTLY="${VERSION}+$(date +%Y%m%d%H%M)"
|
||||
- dch --package "${PACKAGE}" --force-bad-version -v "${VERSION_NIGHTLY}" -D "unstable" --force-distribution "Daily build."
|
||||
- debuild --no-lintian -us -uc
|
||||
|
||||
########################################
|
||||
# BUILD DEB
|
||||
########################################
|
||||
|
||||
build-yunohost:
|
||||
extends: .build-stage
|
||||
variables:
|
||||
PACKAGE: "yunohost"
|
||||
script:
|
||||
- git ls-files | xargs tar -czf archive.tar.gz
|
||||
- mkdir -p $YNH_BUILD_DIR/$PACKAGE
|
||||
- cat archive.tar.gz | tar -xz -C $YNH_BUILD_DIR/$PACKAGE
|
||||
- rm archive.tar.gz
|
||||
- DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE
|
||||
- *build_script
|
||||
|
||||
|
||||
build-ssowat:
|
||||
extends: .build-stage
|
||||
variables:
|
||||
PACKAGE: "ssowat"
|
||||
script:
|
||||
- git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE
|
||||
- DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE
|
||||
- *build_script
|
||||
|
||||
build-moulinette:
|
||||
extends: .build-stage
|
||||
variables:
|
||||
PACKAGE: "moulinette"
|
||||
script:
|
||||
- git clone $YNH_SOURCE/$PACKAGE -b $CI_COMMIT_REF_NAME $YNH_BUILD_DIR/$PACKAGE || git clone $YNH_SOURCE/$PACKAGE $YNH_BUILD_DIR/$PACKAGE
|
||||
- DEBIAN_FRONTEND=noninteractive apt --assume-yes -o Dpkg::Options::="--force-confold" build-dep $(pwd)/$YNH_BUILD_DIR/$PACKAGE
|
||||
- *build_script
|
14
.gitlab/ci/doc.gitlab-ci.yml
Normal file
14
.gitlab/ci/doc.gitlab-ci.yml
Normal file
|
@ -0,0 +1,14 @@
|
|||
########################################
|
||||
# DOC
|
||||
########################################
|
||||
|
||||
generate-helpers-doc:
|
||||
stage: doc
|
||||
image: "before-install"
|
||||
needs: []
|
||||
script:
|
||||
- cd doc
|
||||
- python generate_helper_doc.py
|
||||
artifacts:
|
||||
paths:
|
||||
- doc/helpers.html
|
37
.gitlab/ci/install.gitlab-ci.yml
Normal file
37
.gitlab/ci/install.gitlab-ci.yml
Normal file
|
@ -0,0 +1,37 @@
|
|||
.install-stage:
|
||||
stage: install
|
||||
needs:
|
||||
- job: build-yunohost
|
||||
artifacts: true
|
||||
- job: build-ssowat
|
||||
artifacts: true
|
||||
- job: build-moulinette
|
||||
artifacts: true
|
||||
|
||||
########################################
|
||||
# INSTALL DEB
|
||||
########################################
|
||||
|
||||
upgrade:
|
||||
extends: .install-stage
|
||||
image: "after-install"
|
||||
script:
|
||||
- apt-get update -o Acquire::Retries=3
|
||||
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
|
||||
|
||||
|
||||
install-postinstall:
|
||||
extends: .install-stage
|
||||
image: "before-install"
|
||||
script:
|
||||
- apt-get update -o Acquire::Retries=3
|
||||
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
|
||||
- systemctl -q stop apt-daily.timer
|
||||
- systemctl -q stop apt-daily-upgrade.timer
|
||||
- systemctl -q stop apt-daily.service
|
||||
- systemctl -q stop apt-daily-upgrade.service
|
||||
- systemctl -q disable apt-daily.timer
|
||||
- systemctl -q disable apt-daily-upgrade.timer
|
||||
- systemctl -q disable apt-daily.service
|
||||
- systemctl -q disable apt-daily-upgrade.service
|
||||
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns
|
24
.gitlab/ci/lint.gitlab-ci.yml
Normal file
24
.gitlab/ci/lint.gitlab-ci.yml
Normal file
|
@ -0,0 +1,24 @@
|
|||
########################################
|
||||
# LINTER
|
||||
########################################
|
||||
|
||||
lint:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
allow_failure: true
|
||||
script:
|
||||
- tox -e lint
|
||||
|
||||
invalidcode:
|
||||
stage: lint
|
||||
image: "before-install"
|
||||
needs: []
|
||||
script:
|
||||
- tox -e invalidcode
|
||||
|
||||
# Disabled, waiting for buster
|
||||
#format-check:
|
||||
# extends: .lint-stage
|
||||
# script:
|
||||
# - black --check --diff
|
128
.gitlab/ci/test.gitlab-ci.yml
Normal file
128
.gitlab/ci/test.gitlab-ci.yml
Normal file
|
@ -0,0 +1,128 @@
|
|||
.install_debs: &install_debs
|
||||
- systemctl -q stop apt-daily.timer
|
||||
- systemctl -q stop apt-daily-upgrade.timer
|
||||
- systemctl -q stop apt-daily.service
|
||||
- systemctl -q stop apt-daily-upgrade.service
|
||||
- systemctl -q disable apt-daily.timer
|
||||
- systemctl -q disable apt-daily-upgrade.timer
|
||||
- systemctl -q disable apt-daily.service
|
||||
- systemctl -q disable apt-daily-upgrade.service
|
||||
- apt-get update -o Acquire::Retries=3
|
||||
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
|
||||
|
||||
.test-stage:
|
||||
stage: tests
|
||||
image: "after-install"
|
||||
variables:
|
||||
PYTEST_ADDOPTS: "--color=yes"
|
||||
before_script:
|
||||
- *install_debs
|
||||
cache:
|
||||
paths:
|
||||
- src/yunohost/tests/apps
|
||||
key: "$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"
|
||||
needs:
|
||||
- job: build-yunohost
|
||||
artifacts: true
|
||||
- job: build-ssowat
|
||||
artifacts: true
|
||||
- job: build-moulinette
|
||||
artifacts: true
|
||||
- job: upgrade
|
||||
|
||||
|
||||
########################################
|
||||
# TESTS
|
||||
########################################
|
||||
|
||||
full-tests:
|
||||
stage: tests
|
||||
image: "before-install"
|
||||
variables:
|
||||
PYTEST_ADDOPTS: "--color=yes"
|
||||
before_script:
|
||||
- *install_debs
|
||||
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns
|
||||
script:
|
||||
- pytest --cov=yunohost tests/ src/yunohost/tests/ --junitxml=report.xml
|
||||
needs:
|
||||
- job: build-yunohost
|
||||
artifacts: true
|
||||
- job: build-ssowat
|
||||
artifacts: true
|
||||
- job: build-moulinette
|
||||
artifacts: true
|
||||
artifacts:
|
||||
reports:
|
||||
junit: report.xml
|
||||
|
||||
root-tests:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- py.test tests
|
||||
|
||||
test-apps:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_apps.py
|
||||
|
||||
test-appscatalog:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_appscatalog.py
|
||||
|
||||
test-appurl:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_appurl.py
|
||||
|
||||
test-apps-arguments-parsing:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_apps_arguments_parsing.py
|
||||
|
||||
test-backuprestore:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_backuprestore.py
|
||||
|
||||
test-changeurl:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_changeurl.py
|
||||
|
||||
test-permission:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_permission.py
|
||||
|
||||
test-settings:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_settings.py
|
||||
|
||||
test-user-group:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_user-group.py
|
||||
|
||||
test-regenconf:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_regenconf.py
|
||||
|
||||
test-service:
|
||||
extends: .test-stage
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_service.py
|
173
bin/yunohost
173
bin/yunohost
|
@ -5,70 +5,28 @@ import os
|
|||
import sys
|
||||
import argparse
|
||||
|
||||
# Either we are in a development environment or not
|
||||
IN_DEVEL = False
|
||||
sys.path.insert(0, "/usr/lib/moulinette/")
|
||||
import yunohost
|
||||
|
||||
# Level for which loggers will log
|
||||
LOGGERS_LEVEL = 'DEBUG'
|
||||
TTY_LOG_LEVEL = 'INFO'
|
||||
|
||||
# Handlers that will be used by loggers
|
||||
# - file: log to the file LOG_DIR/LOG_FILE
|
||||
# - tty: log to current tty
|
||||
LOGGERS_HANDLERS = ['file', 'tty']
|
||||
|
||||
# Directory and file to be used by logging
|
||||
LOG_DIR = '/var/log/yunohost'
|
||||
LOG_FILE = 'yunohost-cli.log'
|
||||
|
||||
# Check and load - as needed - development environment
|
||||
if not __file__.startswith('/usr/'):
|
||||
IN_DEVEL = True
|
||||
if IN_DEVEL:
|
||||
basedir = os.path.abspath('%s/../' % os.path.dirname(__file__))
|
||||
if os.path.isdir(os.path.join(basedir, 'moulinette')):
|
||||
sys.path.insert(0, basedir)
|
||||
LOG_DIR = os.path.join(basedir, 'log')
|
||||
|
||||
|
||||
import moulinette
|
||||
from moulinette.actionsmap import ActionsMap
|
||||
from moulinette.interfaces.cli import colorize, get_locale
|
||||
|
||||
|
||||
# Initialization & helpers functions -----------------------------------
|
||||
|
||||
def _die(message, title='Error:'):
|
||||
"""Print error message and exit"""
|
||||
print('%s %s' % (colorize(title, 'red'), message))
|
||||
sys.exit(1)
|
||||
|
||||
def _parse_cli_args():
|
||||
"""Parse additional arguments for the cli"""
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
parser.add_argument('--no-cache',
|
||||
action='store_false', default=True, dest='use_cache',
|
||||
help="Don't use actions map cache",
|
||||
)
|
||||
parser.add_argument('--output-as',
|
||||
choices=['json', 'plain', 'none'], default=None,
|
||||
help="Output result in another format",
|
||||
help="Output result in another format"
|
||||
)
|
||||
parser.add_argument('--debug',
|
||||
action='store_true', default=False,
|
||||
help="Log and print debug messages",
|
||||
help="Log and print debug messages"
|
||||
)
|
||||
parser.add_argument('--quiet',
|
||||
action='store_true', default=False,
|
||||
help="Don't produce any output",
|
||||
help="Don't produce any output"
|
||||
)
|
||||
parser.add_argument('--timeout',
|
||||
type=int, default=None,
|
||||
help="Number of seconds before this command will timeout because it can't acquire the lock (meaning that another command is currently running), by default there is no timeout and the command will wait until it can get the lock",
|
||||
)
|
||||
parser.add_argument('--admin-password',
|
||||
default=None, dest='password', metavar='PASSWORD',
|
||||
help="The admin password to use to authenticate",
|
||||
help="Number of seconds before this command will timeout because it can't acquire the lock (meaning that another command is currently running), by default there is no timeout and the command will wait until it can get the lock"
|
||||
)
|
||||
# deprecated arguments
|
||||
parser.add_argument('--plain',
|
||||
|
@ -88,96 +46,6 @@ def _parse_cli_args():
|
|||
|
||||
return (parser, opts, args)
|
||||
|
||||
def _init_moulinette(debug=False, quiet=False):
|
||||
"""Configure logging and initialize the moulinette"""
|
||||
# Define loggers handlers
|
||||
handlers = set(LOGGERS_HANDLERS)
|
||||
if quiet and 'tty' in handlers:
|
||||
handlers.remove('tty')
|
||||
elif 'tty' not in handlers:
|
||||
handlers.append('tty')
|
||||
|
||||
root_handlers = set(handlers)
|
||||
if not debug and 'tty' in root_handlers:
|
||||
root_handlers.remove('tty')
|
||||
|
||||
# Define loggers level
|
||||
level = LOGGERS_LEVEL
|
||||
tty_level = TTY_LOG_LEVEL
|
||||
if debug:
|
||||
tty_level = 'DEBUG'
|
||||
|
||||
# Custom logging configuration
|
||||
logging = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': True,
|
||||
'formatters': {
|
||||
'tty-debug': {
|
||||
'format': '%(relativeCreated)-4d %(fmessage)s'
|
||||
},
|
||||
'precise': {
|
||||
'format': '%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s'
|
||||
},
|
||||
},
|
||||
'filters': {
|
||||
'action': {
|
||||
'()': 'moulinette.utils.log.ActionFilter',
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'tty': {
|
||||
'level': tty_level,
|
||||
'class': 'moulinette.interfaces.cli.TTYHandler',
|
||||
'formatter': 'tty-debug' if debug else '',
|
||||
},
|
||||
'file': {
|
||||
'class': 'logging.FileHandler',
|
||||
'formatter': 'precise',
|
||||
'filename': '%s/%s' % (LOG_DIR, LOG_FILE),
|
||||
'filters': ['action'],
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'yunohost': {
|
||||
'level': level,
|
||||
'handlers': handlers,
|
||||
'propagate': False,
|
||||
},
|
||||
'moulinette': {
|
||||
'level': level,
|
||||
'handlers': [],
|
||||
'propagate': True,
|
||||
},
|
||||
'moulinette.interface': {
|
||||
'level': level,
|
||||
'handlers': handlers,
|
||||
'propagate': False,
|
||||
},
|
||||
},
|
||||
'root': {
|
||||
'level': level,
|
||||
'handlers': root_handlers,
|
||||
},
|
||||
}
|
||||
|
||||
# Create log directory
|
||||
if not os.path.isdir(LOG_DIR):
|
||||
try:
|
||||
os.makedirs(LOG_DIR, 0750)
|
||||
except os.error as e:
|
||||
_die(str(e))
|
||||
|
||||
# Initialize moulinette
|
||||
moulinette.init(logging_config=logging, _from_source=IN_DEVEL)
|
||||
|
||||
def _retrieve_namespaces():
|
||||
"""Return the list of namespaces to load"""
|
||||
ret = ['yunohost']
|
||||
for n in ActionsMap.get_namespaces():
|
||||
# Append YunoHost modules
|
||||
if n.startswith('ynh_'):
|
||||
ret.append(n)
|
||||
return ret
|
||||
|
||||
# Stupid PATH management because sometimes (e.g. some cron job) PATH is only /usr/bin:/bin ...
|
||||
default_path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
@ -188,33 +56,18 @@ if os.environ["PATH"] != default_path:
|
|||
|
||||
if __name__ == '__main__':
|
||||
if os.geteuid() != 0:
|
||||
# since moulinette isn't initialized, we can't use m18n here
|
||||
sys.stderr.write("\033[1;31mError:\033[0m yunohost command must be " \
|
||||
sys.stderr.write("\033[1;31mError:\033[0m yunohost command must be "
|
||||
"run as root or with sudo.\n")
|
||||
sys.exit(1)
|
||||
|
||||
parser, opts, args = _parse_cli_args()
|
||||
_init_moulinette(opts.debug, opts.quiet)
|
||||
|
||||
# Check that YunoHost is installed
|
||||
if not os.path.isfile('/etc/yunohost/installed') and \
|
||||
(len(args) < 2 or (args[0] +' '+ args[1] != 'tools postinstall' and \
|
||||
args[0] +' '+ args[1] != 'backup restore' and \
|
||||
args[0] +' '+ args[1] != 'log display')):
|
||||
|
||||
from moulinette import m18n
|
||||
# Init i18n
|
||||
m18n.load_namespace('yunohost')
|
||||
m18n.set_locale(get_locale())
|
||||
|
||||
# Print error and exit
|
||||
_die(m18n.n('yunohost_not_installed'), m18n.g('error'))
|
||||
|
||||
# Execute the action
|
||||
ret = moulinette.cli(
|
||||
_retrieve_namespaces(), args,
|
||||
use_cache=opts.use_cache, output_as=opts.output_as,
|
||||
password=opts.password, parser_kwargs={'top_parser': parser},
|
||||
yunohost.cli(
|
||||
debug=opts.debug,
|
||||
quiet=opts.quiet,
|
||||
output_as=opts.output_as,
|
||||
timeout=opts.timeout,
|
||||
args=args,
|
||||
parser=parser
|
||||
)
|
||||
sys.exit(ret)
|
||||
|
|
172
bin/yunohost-api
172
bin/yunohost-api
|
@ -1,52 +1,16 @@
|
|||
#! /usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
|
||||
# Either we are in a development environment or not
|
||||
IN_DEVEL = False
|
||||
sys.path.insert(0, "/usr/lib/moulinette/")
|
||||
import yunohost
|
||||
|
||||
# Default server configuration
|
||||
DEFAULT_HOST = 'localhost'
|
||||
DEFAULT_PORT = 6787
|
||||
|
||||
# Level for which loggers will log
|
||||
LOGGERS_LEVEL = 'DEBUG'
|
||||
API_LOGGER_LEVEL = 'INFO'
|
||||
|
||||
# Handlers that will be used by loggers
|
||||
# - file: log to the file LOG_DIR/LOG_FILE
|
||||
# - api: serve logs through the api
|
||||
# - console: log to stderr
|
||||
LOGGERS_HANDLERS = ['file', 'api']
|
||||
|
||||
# Directory and file to be used by logging
|
||||
LOG_DIR = '/var/log/yunohost'
|
||||
LOG_FILE = 'yunohost-api.log'
|
||||
|
||||
# Check and load - as needed - development environment
|
||||
if not __file__.startswith('/usr/'):
|
||||
IN_DEVEL = True
|
||||
if IN_DEVEL:
|
||||
basedir = os.path.abspath('%s/../' % os.path.dirname(__file__))
|
||||
if os.path.isdir(os.path.join(basedir, 'moulinette')):
|
||||
sys.path.insert(0, basedir)
|
||||
LOG_DIR = os.path.join(basedir, 'log')
|
||||
|
||||
|
||||
import moulinette
|
||||
from moulinette.actionsmap import ActionsMap
|
||||
from moulinette.interfaces.cli import colorize
|
||||
|
||||
|
||||
# Initialization & helpers functions -----------------------------------
|
||||
|
||||
def _die(message, title='Error:'):
|
||||
"""Print error message and exit"""
|
||||
print('%s %s' % (colorize(title, 'red'), message))
|
||||
sys.exit(1)
|
||||
|
||||
def _parse_api_args():
|
||||
"""Parse main arguments for the api"""
|
||||
|
@ -62,149 +26,19 @@ def _parse_api_args():
|
|||
action='store', default=DEFAULT_PORT, type=int,
|
||||
help="Port to listen on (default: %d)" % DEFAULT_PORT,
|
||||
)
|
||||
srv_group.add_argument('--no-websocket',
|
||||
action='store_true', default=True, dest='use_websocket',
|
||||
help="Serve without WebSocket support, used to handle "
|
||||
"asynchronous responses such as the messages",
|
||||
)
|
||||
glob_group = parser.add_argument_group('global arguments')
|
||||
glob_group.add_argument('--no-cache',
|
||||
action='store_false', default=True, dest='use_cache',
|
||||
help="Don't use actions map cache",
|
||||
)
|
||||
glob_group.add_argument('--debug',
|
||||
action='store_true', default=False,
|
||||
help="Set log level to DEBUG",
|
||||
)
|
||||
glob_group.add_argument('--verbose',
|
||||
action='store_true', default=False,
|
||||
help="Be verbose in the output",
|
||||
)
|
||||
glob_group.add_argument('--help',
|
||||
action='help', help="Show this help message and exit",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
def _init_moulinette(use_websocket=True, debug=False, verbose=False):
|
||||
"""Configure logging and initialize the moulinette"""
|
||||
# Define loggers handlers
|
||||
handlers = set(LOGGERS_HANDLERS)
|
||||
if not use_websocket and 'api' in handlers:
|
||||
handlers.remove('api')
|
||||
if verbose and 'console' not in handlers:
|
||||
handlers.add('console')
|
||||
root_handlers = handlers - set(['api'])
|
||||
|
||||
# Define loggers level
|
||||
level = LOGGERS_LEVEL
|
||||
api_level = API_LOGGER_LEVEL
|
||||
if debug:
|
||||
level = 'DEBUG'
|
||||
api_level = 'DEBUG'
|
||||
|
||||
# Custom logging configuration
|
||||
logging = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': True,
|
||||
'formatters': {
|
||||
'console': {
|
||||
'format': '%(relativeCreated)-5d %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s'
|
||||
},
|
||||
'precise': {
|
||||
'format': '%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s'
|
||||
},
|
||||
},
|
||||
'filters': {
|
||||
'action': {
|
||||
'()': 'moulinette.utils.log.ActionFilter',
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'api': {
|
||||
'level': api_level,
|
||||
'class': 'moulinette.interfaces.api.APIQueueHandler',
|
||||
},
|
||||
'file': {
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'formatter': 'precise',
|
||||
'filename': '%s/%s' % (LOG_DIR, LOG_FILE),
|
||||
'filters': ['action'],
|
||||
},
|
||||
'console': {
|
||||
'class': 'logging.StreamHandler',
|
||||
'formatter': 'console',
|
||||
'stream': 'ext://sys.stdout',
|
||||
'filters': ['action'],
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'yunohost': {
|
||||
'level': level,
|
||||
'handlers': handlers,
|
||||
'propagate': False,
|
||||
},
|
||||
'moulinette': {
|
||||
'level': level,
|
||||
'handlers': [],
|
||||
'propagate': True,
|
||||
},
|
||||
'gnupg': {
|
||||
'level': 'INFO',
|
||||
'handlers': [],
|
||||
'propagate': False,
|
||||
},
|
||||
},
|
||||
'root': {
|
||||
'level': level,
|
||||
'handlers': root_handlers,
|
||||
},
|
||||
}
|
||||
|
||||
# Create log directory
|
||||
if not os.path.isdir(LOG_DIR):
|
||||
try:
|
||||
os.makedirs(LOG_DIR, 0750)
|
||||
except os.error as e:
|
||||
_die(str(e))
|
||||
|
||||
# Initialize moulinette
|
||||
moulinette.init(logging_config=logging, _from_source=IN_DEVEL)
|
||||
|
||||
def _retrieve_namespaces():
|
||||
"""Return the list of namespaces to load"""
|
||||
ret = ['yunohost']
|
||||
for n in ActionsMap.get_namespaces():
|
||||
# Append YunoHost modules
|
||||
if n.startswith('ynh_'):
|
||||
ret.append(n)
|
||||
return ret
|
||||
|
||||
|
||||
# Callbacks for additional routes --------------------------------------
|
||||
|
||||
def is_installed():
|
||||
"""
|
||||
Check whether YunoHost is installed or not
|
||||
|
||||
"""
|
||||
installed = False
|
||||
if os.path.isfile('/etc/yunohost/installed'):
|
||||
installed = True
|
||||
return { 'installed': installed }
|
||||
|
||||
|
||||
# Main action ----------------------------------------------------------
|
||||
|
||||
if __name__ == '__main__':
|
||||
opts = _parse_api_args()
|
||||
_init_moulinette(opts.use_websocket, opts.debug, opts.verbose)
|
||||
|
||||
# Run the server
|
||||
ret = moulinette.api(
|
||||
_retrieve_namespaces(),
|
||||
host=opts.host, port=opts.port, routes={
|
||||
('GET', '/installed'): is_installed,
|
||||
}, use_cache=opts.use_cache, use_websocket=opts.use_websocket
|
||||
)
|
||||
sys.exit(ret)
|
||||
yunohost.api(debug=opts.debug, host=opts.host, port=opts.port)
|
||||
|
|
|
@ -996,7 +996,6 @@ service:
|
|||
choices:
|
||||
- file
|
||||
- systemd
|
||||
default: file
|
||||
--test_status:
|
||||
help: Specify a custom bash command to check the status of the service. Note that it only makes sense to specify this if the corresponding systemd service does not return the proper information already.
|
||||
--test_conf:
|
||||
|
@ -1692,6 +1691,9 @@ diagnosis:
|
|||
--share:
|
||||
help: Share the logs using yunopaste
|
||||
action: store_true
|
||||
--human-readable:
|
||||
help: Show a human-readable output
|
||||
action: store_true
|
||||
|
||||
run:
|
||||
action_help: Run diagnosis
|
||||
|
@ -1706,6 +1708,9 @@ diagnosis:
|
|||
--except-if-never-ran-yet:
|
||||
help: Don't run anything if diagnosis never ran yet ... (this is meant to be used by the webadmin)
|
||||
action: store_true
|
||||
--email:
|
||||
help: Send an email to root with issues found (this is meant to be used by cron job)
|
||||
action: store_true
|
||||
|
||||
ignore:
|
||||
action_help: Configure some diagnosis results to be ignored and therefore not considered as actual issues
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
# Requires YunoHost version 3.3.1 or higher.
|
||||
ynh_wait_dpkg_free() {
|
||||
local try
|
||||
set +o xtrace # set +x
|
||||
# With seq 1 17, timeout will be almost 30 minutes
|
||||
for try in `seq 1 17`
|
||||
do
|
||||
|
@ -32,13 +33,16 @@ ynh_wait_dpkg_free() {
|
|||
then
|
||||
# If so, that a remaining of dpkg.
|
||||
ynh_print_err "E: dpkg was interrupted, you must manually run 'sudo dpkg --configure -a' to correct the problem."
|
||||
set -o xtrace # set -x
|
||||
return 1
|
||||
fi
|
||||
done 9<<< "$(ls -1 $dpkg_dir)"
|
||||
set -o xtrace # set -x
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
echo "apt still used, but timeout reached !"
|
||||
set -o xtrace # set -x
|
||||
}
|
||||
|
||||
# Check either a package is installed or not
|
||||
|
@ -96,7 +100,7 @@ ynh_package_version() {
|
|||
# Requires YunoHost version 2.4.0.3 or higher.
|
||||
ynh_apt() {
|
||||
ynh_wait_dpkg_free
|
||||
LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get --assume-yes $@
|
||||
LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get --assume-yes --quiet -o=Acquire::Retries=3 -o=Dpkg::Use-Pty=0 $@
|
||||
}
|
||||
|
||||
# Update package index files
|
||||
|
@ -256,15 +260,17 @@ ynh_install_app_dependencies () {
|
|||
# And we have packages from sury installed (7.0.33-10+weirdshiftafter instead of 7.0.33-0 on debian)
|
||||
if dpkg --list | grep "php7.0" | grep --quiet --invert-match "7.0.33-0+deb9"
|
||||
then
|
||||
# And sury ain't already installed
|
||||
if ! grep --line-number --recursive --quiet "sury" /etc/apt/sources.list*
|
||||
# And sury ain't already in sources.lists
|
||||
if ! grep --recursive --quiet "^ *deb.*sury" /etc/apt/sources.list*
|
||||
then
|
||||
# Re-add sury
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --name=extra_php_version
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --name=extra_php_version --priority=600
|
||||
|
||||
# Pin this sury repository to prevent sury of doing shit
|
||||
ynh_pin_repo --package="*" --pin="origin \"packages.sury.org\"" --priority=200 --name=extra_php_version
|
||||
ynh_pin_repo --package="php${YNH_DEFAULT_PHP_VERSION}*" --pin="origin \"packages.sury.org\"" --priority=600 --name=extra_php_version --append
|
||||
for package_to_not_upgrade in "php" "php-fpm" "php-mysql" "php-xml" "php-zip" "php-mbstring" "php-ldap" "php-gd" "php-curl" "php-bz2" "php-json" "php-sqlite3" "php-intl" "openssl" "libssl1.1" "libssl-dev"
|
||||
do
|
||||
ynh_pin_repo --package="$package_to_not_upgrade" --pin="origin \"packages.sury.org\"" --priority="-1" --name=extra_php_version --append
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -329,8 +335,6 @@ ynh_remove_app_dependencies () {
|
|||
ynh_package_autopurge ${dep_app}-ynh-deps # Remove the fake package and its dependencies if they not still used.
|
||||
}
|
||||
|
||||
#=================================================
|
||||
|
||||
# Install packages from an extra repository properly.
|
||||
#
|
||||
# usage: ynh_install_extra_app_dependencies --repo="repo" --package="dep1 dep2" [--key=key_url] [--name=name]
|
||||
|
@ -463,8 +467,8 @@ ynh_remove_extra_repo () {
|
|||
|
||||
ynh_secure_remove "/etc/apt/sources.list.d/$name.list"
|
||||
ynh_secure_remove "/etc/apt/preferences.d/$name"
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.gpg"
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.asc"
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.gpg" > /dev/null
|
||||
ynh_secure_remove "/etc/apt/trusted.gpg.d/$name.asc" > /dev/null
|
||||
|
||||
# Update the list of package to exclude the old repo
|
||||
ynh_package_update
|
||||
|
|
|
@ -134,7 +134,7 @@ EOF
|
|||
|
||||
ynh_systemd_action --service_name=fail2ban --action=reload --line_match="(Started|Reloaded) Fail2Ban Service" --log_path=systemd
|
||||
|
||||
local fail2ban_error="$(journalctl --unit=fail2ban | tail --lines=50 | grep "WARNING.*$app.*")"
|
||||
local fail2ban_error="$(journalctl --no-hostname --unit=fail2ban | tail --lines=50 | grep "WARNING.*$app.*")"
|
||||
if [[ -n "$fail2ban_error" ]]
|
||||
then
|
||||
ynh_print_err --message="Fail2ban failed to load the jail for $app"
|
||||
|
|
|
@ -80,10 +80,10 @@ ynh_handle_getopts_args () {
|
|||
arguments[arg]="${arguments[arg]//--${args_array[$option_flag]}-/--${args_array[$option_flag]}\\TOBEREMOVED\\-}"
|
||||
# And replace long option (value of the option_flag) by the short option, the option_flag itself
|
||||
# (e.g. for [u]=user, --user will be -u)
|
||||
# Replace long option with =
|
||||
arguments[arg]="${arguments[arg]//--${args_array[$option_flag]}/-${option_flag} }"
|
||||
# And long option without =
|
||||
arguments[arg]="${arguments[arg]//--${args_array[$option_flag]%=}/-${option_flag}}"
|
||||
# Replace long option with = (match the beginning of the argument)
|
||||
arguments[arg]="$(echo "${arguments[arg]}" | sed "s/^--${args_array[$option_flag]}/-${option_flag} /")"
|
||||
# And long option without = (match the whole line)
|
||||
arguments[arg]="$(echo "${arguments[arg]}" | sed "s/^--${args_array[$option_flag]%=}$/-${option_flag} /")"
|
||||
done
|
||||
done
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ ynh_print_warn () {
|
|||
# Manage arguments with getopts
|
||||
ynh_handle_getopts_args "$@"
|
||||
|
||||
ynh_print_log "\e[93m\e[1m[WARN]\e[0m ${message}" >&2
|
||||
ynh_print_log "${message}" >&2
|
||||
}
|
||||
|
||||
# Print an error on stderr
|
||||
|
@ -97,7 +97,7 @@ ynh_print_err () {
|
|||
# Manage arguments with getopts
|
||||
ynh_handle_getopts_args "$@"
|
||||
|
||||
ynh_print_log "\e[91m\e[1m[ERR]\e[0m ${message}" >&2
|
||||
ynh_print_log "[Error] ${message}" >&2
|
||||
}
|
||||
|
||||
# Execute a command and print the result as an error
|
||||
|
@ -332,7 +332,7 @@ ynh_debug () {
|
|||
|
||||
if [ -n "$message" ]
|
||||
then
|
||||
ynh_print_log "\e[34m\e[1m[DEBUG]\e[0m ${message}" >&2
|
||||
ynh_print_log "[Debug] ${message}" >&2
|
||||
fi
|
||||
|
||||
if [ "$trace" == "1" ]
|
||||
|
|
|
@ -44,8 +44,13 @@ ynh_mysql_execute_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
ynh_mysql_connect_as --user="root" --password="$(cat $MYSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" <<< "$sql"
|
||||
$database <<< "$sql"
|
||||
}
|
||||
|
||||
# Execute a command from a file as root user
|
||||
|
@ -65,8 +70,14 @@ ynh_mysql_execute_file_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
|
||||
ynh_mysql_connect_as --user="root" --password="$(cat $MYSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" < "$file"
|
||||
$database < "$file"
|
||||
}
|
||||
|
||||
# Create a database and grant optionnaly privilegies to a user
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
n_version=6.5.1
|
||||
n_install_dir="/opt/node_n"
|
||||
node_version_path="$n_install_dir/n/versions/node"
|
||||
# N_PREFIX is the directory of n, it needs to be loaded as a environment variable.
|
||||
|
@ -16,8 +17,8 @@ ynh_install_n () {
|
|||
ynh_print_info --message="Installation of N - Node.js version management"
|
||||
# Build an app.src for n
|
||||
mkdir --parents "../conf"
|
||||
echo "SOURCE_URL=https://github.com/tj/n/archive/v4.1.0.tar.gz
|
||||
SOURCE_SUM=3983fa3f00d4bf85ba8e21f1a590f6e28938093abe0bb950aeea52b1717471fc" > "../conf/n.src"
|
||||
echo "SOURCE_URL=https://github.com/tj/n/archive/v${n_version}.tar.gz
|
||||
SOURCE_SUM=5833f15893b9951a9ed59487e87b6c181d96b83a525846255872c4f92f0d25dd" > "../conf/n.src"
|
||||
# Download and extract n
|
||||
ynh_setup_source --dest_dir="$n_install_dir/git" --source_id=n
|
||||
# Install n
|
||||
|
@ -125,7 +126,10 @@ ynh_install_nodejs () {
|
|||
test -x /usr/bin/npm && mv /usr/bin/npm /usr/bin/npm_n
|
||||
|
||||
# If n is not previously setup, install it
|
||||
if ! test $(n --version > /dev/null 2>&1)
|
||||
if ! $n_install_dir/bin/n --version > /dev/null 2>&1
|
||||
then
|
||||
ynh_install_n
|
||||
elif dpkg --compare-versions "$($n_install_dir/bin/n --version)" lt $n_version
|
||||
then
|
||||
ynh_install_n
|
||||
fi
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
readonly YNH_DEFAULT_PHP_VERSION=7.0
|
||||
readonly YNH_DEFAULT_PHP_VERSION=7.3
|
||||
# Declare the actual php version to use.
|
||||
# A packager willing to use another version of php can override the variable into its _common.sh.
|
||||
YNH_PHP_VERSION=${YNH_PHP_VERSION:-$YNH_DEFAULT_PHP_VERSION}
|
||||
|
@ -148,8 +148,14 @@ ynh_add_fpm_config () {
|
|||
|
||||
if [ $use_template -eq 1 ]
|
||||
then
|
||||
# Usage 1, use the template in ../conf/php-fpm.conf
|
||||
cp ../conf/php-fpm.conf "$finalphpconf"
|
||||
# Usage 1, use the template in conf/php-fpm.conf
|
||||
local phpfpm_path="../conf/php-fpm.conf"
|
||||
if [ ! -e "$phpfpm_path" ]; then
|
||||
phpfpm_path="../settings/conf/php-fpm.conf" # Into the restore script, the php-fpm template is not at the same place
|
||||
fi
|
||||
# Make sure now that the template indeed exists
|
||||
[ -e "$phpfpm_path" ] || ynh_die --message="Unable to find template to configure php-fpm."
|
||||
cp "$phpfpm_path" "$finalphpconf"
|
||||
ynh_replace_string --match_string="__NAMETOCHANGE__" --replace_string="$app" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalphpconf"
|
||||
ynh_replace_string --match_string="__USER__" --replace_string="$app" --target_file="$finalphpconf"
|
||||
|
@ -297,7 +303,10 @@ ynh_remove_fpm_config () {
|
|||
fi
|
||||
|
||||
ynh_secure_remove --file="$fpm_config_dir/pool.d/$app.conf"
|
||||
ynh_exec_warn_less ynh_secure_remove --file="$fpm_config_dir/conf.d/20-$app.ini"
|
||||
if [ -e $fpm_config_dir/conf.d/20-$app.ini ]
|
||||
then
|
||||
ynh_secure_remove --file="$fpm_config_dir/conf.d/20-$app.ini"
|
||||
fi
|
||||
|
||||
# If the php version used is not the default version for YunoHost
|
||||
if [ "$phpversion" != "$YNH_DEFAULT_PHP_VERSION" ]
|
||||
|
@ -345,7 +354,7 @@ ynh_install_php () {
|
|||
fi
|
||||
|
||||
# Add an extra repository for those packages
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --priority=995 --name=extra_php_version
|
||||
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --priority=995 --name=extra_php_version --priority=600
|
||||
|
||||
# Install requested dependencies from this extra repository.
|
||||
# Install php-fpm first, otherwise php will install apache as a dependency.
|
||||
|
@ -356,8 +365,10 @@ ynh_install_php () {
|
|||
update-alternatives --set php /usr/bin/php$YNH_DEFAULT_PHP_VERSION
|
||||
|
||||
# Pin this extra repository after packages are installed to prevent sury of doing shit
|
||||
ynh_pin_repo --package="*" --pin="origin \"packages.sury.org\"" --priority=200 --name=extra_php_version
|
||||
ynh_pin_repo --package="php${YNH_DEFAULT_PHP_VERSION}*" --pin="origin \"packages.sury.org\"" --priority=600 --name=extra_php_version --append
|
||||
for package_to_not_upgrade in "php" "php-fpm" "php-mysql" "php-xml" "php-zip" "php-mbstring" "php-ldap" "php-gd" "php-curl" "php-bz2" "php-json" "php-sqlite3" "php-intl" "openssl" "libssl1.1" "libssl-dev"
|
||||
do
|
||||
ynh_pin_repo --package="$package_to_not_upgrade" --pin="origin \"packages.sury.org\"" --priority="-1" --name=extra_php_version --append
|
||||
done
|
||||
|
||||
# Advertise service in admin panel
|
||||
yunohost service add php${phpversion}-fpm --log "/var/log/php${phpversion}-fpm.log"
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
PSQL_ROOT_PWD_FILE=/etc/yunohost/psql
|
||||
PSQL_VERSION=11
|
||||
|
||||
# Open a connection as a user
|
||||
#
|
||||
|
@ -45,8 +46,13 @@ ynh_psql_execute_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
ynh_psql_connect_as --user="postgres" --password="$(cat $PSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" <<<"$sql"
|
||||
$database <<<"$sql"
|
||||
}
|
||||
|
||||
# Execute a command from a file as root user
|
||||
|
@ -66,8 +72,13 @@ ynh_psql_execute_file_as_root() {
|
|||
ynh_handle_getopts_args "$@"
|
||||
database="${database:-}"
|
||||
|
||||
if [ -n "$database" ]
|
||||
then
|
||||
database="--database=$database"
|
||||
fi
|
||||
|
||||
ynh_psql_connect_as --user="postgres" --password="$(cat $PSQL_ROOT_PWD_FILE)" \
|
||||
--database="$database" <"$file"
|
||||
$database <"$file"
|
||||
}
|
||||
|
||||
# Create a database and grant optionnaly privilegies to a user
|
||||
|
@ -212,7 +223,7 @@ ynh_psql_drop_user() {
|
|||
# usage: ynh_psql_setup_db --db_user=user --db_name=name [--db_pwd=pwd]
|
||||
# | arg: -u, --db_user= - Owner of the database
|
||||
# | arg: -n, --db_name= - Name of the database
|
||||
# | arg: -p, --db_pwd= - Password of the database. If not given, a password will be generated
|
||||
# | arg: -p, --db_pwd= - Password of the database. If not provided, a password will be generated
|
||||
#
|
||||
# After executing this helper, the password of the created database will be available in $db_pwd
|
||||
# It will also be stored as "psqlpwd" into the app settings.
|
||||
|
@ -228,12 +239,14 @@ ynh_psql_setup_db() {
|
|||
# Manage arguments with getopts
|
||||
ynh_handle_getopts_args "$@"
|
||||
|
||||
local new_db_pwd=$(ynh_string_random) # Generate a random password
|
||||
# If $db_pwd is not given, use new_db_pwd instead for db_pwd
|
||||
db_pwd="${db_pwd:-$new_db_pwd}"
|
||||
|
||||
if ! ynh_psql_user_exists --user=$db_user; then
|
||||
local new_db_pwd=$(ynh_string_random) # Generate a random password
|
||||
# If $db_pwd is not provided, use new_db_pwd instead for db_pwd
|
||||
db_pwd="${db_pwd:-$new_db_pwd}"
|
||||
|
||||
ynh_psql_create_user "$db_user" "$db_pwd"
|
||||
elif [ -z $db_pwd ]; then
|
||||
ynh_die --message="The user $db_user exists, please provide his password"
|
||||
fi
|
||||
|
||||
ynh_psql_create_db "$db_name" "$db_user" # Create the database
|
||||
|
@ -273,6 +286,7 @@ ynh_psql_remove_db() {
|
|||
}
|
||||
|
||||
# Create a master password and set up global settings
|
||||
# It also make sure that postgresql is installed and running
|
||||
# Please always call this script in install and restore scripts
|
||||
#
|
||||
# usage: ynh_psql_test_if_first_run
|
||||
|
@ -280,45 +294,38 @@ ynh_psql_remove_db() {
|
|||
# Requires YunoHost version 2.7.13 or higher.
|
||||
ynh_psql_test_if_first_run() {
|
||||
|
||||
if [ -f "$PSQL_ROOT_PWD_FILE" ]
|
||||
# Make sure postgresql is indeed installed
|
||||
dpkg --list | grep -q "ii postgresql-$PSQL_VERSION" || ynh_die "postgresql-$PSQL_VERSION is not installed !?"
|
||||
|
||||
# Check for some weird issue where postgresql could be installed but etc folder would not exist ...
|
||||
[ -e "/etc/postgresql/$PSQL_VERSION" ] || ynh_die "It looks like postgresql was not properly configured ? /etc/postgresql/$PSQL_VERSION is missing ... Could be due to a locale issue, c.f.https://serverfault.com/questions/426989/postgresql-etc-postgresql-doesnt-exist"
|
||||
|
||||
# Make sure postgresql is started and enabled
|
||||
# (N.B. : to check the active state, we check the cluster state because
|
||||
# postgresql could be flagged as active even though the cluster is in
|
||||
# failed state because of how the service is configured..)
|
||||
systemctl is-active postgresql@$PSQL_VERSION-main -q || ynh_systemd_action --service_name=postgresql --action=restart
|
||||
systemctl is-enabled postgresql -q || systemctl enable postgresql --quiet
|
||||
|
||||
# If this is the very first time, we define the root password
|
||||
# and configure a few things
|
||||
if [ ! -f "$PSQL_ROOT_PWD_FILE" ]
|
||||
then
|
||||
ynh_print_info --message="PostgreSQL is already installed, no need to create master password"
|
||||
return
|
||||
local pg_hba=/etc/postgresql/$PSQL_VERSION/main/pg_hba.conf
|
||||
|
||||
local psql_root_password="$(ynh_string_random)"
|
||||
echo "$psql_root_password" >$PSQL_ROOT_PWD_FILE
|
||||
sudo --login --user=postgres psql -c"ALTER user postgres WITH PASSWORD '$psql_root_password'" postgres
|
||||
|
||||
# force all user to connect to local databases using hashed passwords
|
||||
# https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html#EXAMPLE-PG-HBA.CONF
|
||||
# Note: we can't use peer since YunoHost create users with nologin
|
||||
# See: https://github.com/YunoHost/yunohost/blob/unstable/data/helpers.d/user
|
||||
ynh_replace_string --match_string="local\(\s*\)all\(\s*\)all\(\s*\)peer" --replace_string="local\1all\2all\3md5" --target_file="$pg_hba"
|
||||
|
||||
# Integrate postgresql service in yunohost
|
||||
yunohost service add postgresql --log "/var/log/postgresql/"
|
||||
|
||||
ynh_systemd_action --service_name=postgresql --action=reload
|
||||
fi
|
||||
|
||||
local psql_root_password="$(ynh_string_random)"
|
||||
echo "$psql_root_password" >$PSQL_ROOT_PWD_FILE
|
||||
|
||||
if [ -e /etc/postgresql/9.4/ ]
|
||||
then
|
||||
local pg_hba=/etc/postgresql/9.4/main/pg_hba.conf
|
||||
local logfile=/var/log/postgresql/postgresql-9.4-main.log
|
||||
elif [ -e /etc/postgresql/9.6/ ]
|
||||
then
|
||||
local pg_hba=/etc/postgresql/9.6/main/pg_hba.conf
|
||||
local logfile=/var/log/postgresql/postgresql-9.6-main.log
|
||||
else
|
||||
if dpkg --list | grep -q "ii postgresql-9."
|
||||
then
|
||||
ynh_die "It looks like postgresql was not properly configured ? /etc/postgresql/9.* is missing ... Could be due to a locale issue, c.f.https://serverfault.com/questions/426989/postgresql-etc-postgresql-doesnt-exist"
|
||||
else
|
||||
ynh_die "postgresql shoud be 9.4 or 9.6"
|
||||
fi
|
||||
fi
|
||||
|
||||
ynh_systemd_action --service_name=postgresql --action=start
|
||||
|
||||
sudo --login --user=postgres psql -c"ALTER user postgres WITH PASSWORD '$psql_root_password'" postgres
|
||||
|
||||
# force all user to connect to local databases using hashed passwords
|
||||
# https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html#EXAMPLE-PG-HBA.CONF
|
||||
# Note: we can't use peer since YunoHost create users with nologin
|
||||
# See: https://github.com/YunoHost/yunohost/blob/unstable/data/helpers.d/user
|
||||
ynh_replace_string --match_string="local\(\s*\)all\(\s*\)all\(\s*\)peer" --replace_string="local\1all\2all\3md5" --target_file="$pg_hba"
|
||||
|
||||
# Advertise service in admin panel
|
||||
yunohost service add postgresql --log "$logfile"
|
||||
|
||||
systemctl enable postgresql
|
||||
ynh_systemd_action --service_name=postgresql --action=reload
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ ynh_add_systemd_config () {
|
|||
ynh_store_file_checksum --file="$finalsystemdconf"
|
||||
|
||||
chown root: "$finalsystemdconf"
|
||||
systemctl enable $service
|
||||
systemctl enable $service --quiet
|
||||
systemctl daemon-reload
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ ynh_remove_systemd_config () {
|
|||
if [ -e "$finalsystemdconf" ]
|
||||
then
|
||||
ynh_systemd_action --service_name=$service --action=stop
|
||||
systemctl disable $service
|
||||
systemctl disable $service --quiet
|
||||
ynh_secure_remove --file="$finalsystemdconf"
|
||||
systemctl daemon-reload
|
||||
fi
|
||||
|
@ -145,7 +145,7 @@ ynh_systemd_action() {
|
|||
if ! systemctl $action $service_name
|
||||
then
|
||||
# Show syslog for this service
|
||||
ynh_exec_err journalctl --no-pager --lines=$length --unit=$service_name
|
||||
ynh_exec_err journalctl --quiet --no-hostname --no-pager --lines=$length --unit=$service_name
|
||||
# If a log is specified for this service, show also the content of this log
|
||||
if [ -e "$log_path" ]
|
||||
then
|
||||
|
@ -183,7 +183,7 @@ ynh_systemd_action() {
|
|||
then
|
||||
ynh_print_warn --message="The service $service_name didn't fully executed the action ${action} before the timeout."
|
||||
ynh_print_warn --message="Please find here an extract of the end of the log of the service $service_name:"
|
||||
ynh_exec_warn journalctl --no-pager --lines=$length --unit=$service_name
|
||||
ynh_exec_warn journalctl --quiet --no-hostname --no-pager --lines=$length --unit=$service_name
|
||||
if [ -e "$log_path" ]
|
||||
then
|
||||
ynh_print_warn --message="\-\-\-"
|
||||
|
|
|
@ -144,8 +144,13 @@ ynh_setup_source () {
|
|||
then # Use the local source file if it is present
|
||||
cp $local_src $src_filename
|
||||
else # If not, download the source
|
||||
# NB. we have to declare the var as local first,
|
||||
# otherwise 'local foo=$(false) || echo 'pwet'" does'nt work
|
||||
# because local always return 0 ...
|
||||
local out
|
||||
# Timeout option is here to enforce the timeout on dns query and tcp connect (c.f. man wget)
|
||||
local out=`wget --timeout 900 --no-verbose --output-document=$src_filename $src_url 2>&1` || ynh_print_err --message="$out"
|
||||
out=$(wget --tries 3 --no-dns-cache --timeout 900 --no-verbose --output-document=$src_filename $src_url 2>&1) \
|
||||
|| ynh_die --message="$out"
|
||||
fi
|
||||
|
||||
# Check the control sum
|
||||
|
|
|
@ -10,7 +10,8 @@ source /usr/share/yunohost/helpers
|
|||
backup_dir="${1}/conf/ldap"
|
||||
|
||||
# Backup the configuration
|
||||
ynh_backup "/etc/ldap/slapd.conf" "${backup_dir}/slapd.conf"
|
||||
ynh_backup "/etc/ldap/ldap.conf" "${backup_dir}/ldap.conf"
|
||||
ynh_backup "/etc/ldap/slapd.ldif" "${backup_dir}/slapd.ldif"
|
||||
slapcat -b cn=config -l "${backup_dir}/cn=config.master.ldif"
|
||||
|
||||
# Backup the database
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Exit hook on subcommand error or unset variable
|
||||
set -eu
|
||||
|
||||
# Source YNH helpers
|
||||
source /usr/share/yunohost/helpers
|
||||
|
||||
# Backup destination
|
||||
backup_dir="${1}/conf/ynh/mysql"
|
||||
|
||||
# Save MySQL root password
|
||||
ynh_backup "/etc/yunohost/mysql" "${backup_dir}/root_pwd"
|
|
@ -55,14 +55,63 @@ do_pre_regen() {
|
|||
fi
|
||||
|
||||
# add cron job for diagnosis to be ran at 7h and 19h + a random delay between
|
||||
# 0 and 10min, meant to avoid every instances running their diagnosis at
|
||||
# 0 and 20min, meant to avoid every instances running their diagnosis at
|
||||
# exactly the same time, which may overload the diagnosis server.
|
||||
mkdir -p $pending_dir/etc/cron.d/
|
||||
cat > $pending_dir/etc/cron.d/yunohost-diagnosis << EOF
|
||||
SHELL=/bin/bash
|
||||
0 7,19 * * * root : YunoHost Diagnosis; sleep \$((RANDOM\\%600)); yunohost diagnosis run > /dev/null
|
||||
0 7,19 * * * root : YunoHost Automatic Diagnosis; sleep \$((RANDOM\\%1200)); yunohost diagnosis run --email > /dev/null 2>/dev/null || echo "Running the automatic diagnosis failed miserably"
|
||||
EOF
|
||||
|
||||
# legacy stuff to avoid yunohost reporting etckeeper as manually modified
|
||||
# (this make sure that the hash is null / file is flagged as to-delete)
|
||||
mkdir -p $pending_dir/etc/etckeeper
|
||||
touch $pending_dir/etc/etckeeper/etckeeper.conf
|
||||
|
||||
# Skip ntp if inside a container (inspired from the conf of systemd-timesyncd)
|
||||
mkdir -p ${pending_dir}/etc/systemd/system/ntp.service.d/
|
||||
echo "
|
||||
[Unit]
|
||||
ConditionCapability=CAP_SYS_TIME
|
||||
ConditionVirtualization=!container
|
||||
" > ${pending_dir}/etc/systemd/system/ntp.service.d/ynh-override.conf
|
||||
|
||||
# Make nftable conflict with yunohost-firewall
|
||||
mkdir -p ${pending_dir}/etc/systemd/system/nftables.service.d/
|
||||
cat > ${pending_dir}/etc/systemd/system/nftables.service.d/ynh-override.conf << EOF
|
||||
[Unit]
|
||||
# yunohost-firewall and nftables conflict with each other
|
||||
Conflicts=yunohost-firewall.service
|
||||
ConditionFileIsExecutable=!/etc/init.d/yunohost-firewall
|
||||
ConditionPathExists=!/etc/systemd/system/multi-user.target.wants/yunohost-firewall.service
|
||||
EOF
|
||||
}
|
||||
|
||||
do_post_regen() {
|
||||
regen_conf_files=$1
|
||||
|
||||
######################
|
||||
# Enfore permissions #
|
||||
######################
|
||||
|
||||
# Certs
|
||||
# We do this with find because there could be a lot of them...
|
||||
chown -R root:ssl-cert /etc/yunohost/certs
|
||||
chmod 750 /etc/yunohost/certs
|
||||
find /etc/yunohost/certs/ -type f -exec chmod 640 {} \;
|
||||
find /etc/yunohost/certs/ -type d -exec chmod 750 {} \;
|
||||
|
||||
# Misc configuration / state files
|
||||
chown root:root $(ls /etc/yunohost/{*.yml,*.yaml,*.json,mysql,psql} 2>/dev/null)
|
||||
chmod 600 $(ls /etc/yunohost/{*.yml,*.yaml,*.json,mysql,psql} 2>/dev/null)
|
||||
|
||||
# Apps folder, custom hooks folder
|
||||
[[ ! -e /etc/yunohost/hooks.d ]] || (chown root /etc/yunohost/hooks.d && chmod 700 /etc/yunohost/hooks.d)
|
||||
[[ ! -e /etc/yunohost/apps ]] || (chown root /etc/yunohost/apps && chmod 700 /etc/yunohost/apps)
|
||||
|
||||
# Propagates changes in systemd service config overrides
|
||||
[[ ! "$regen_conf_files" =~ "ntp.service.d/ynh-override.conf" ]] || { systemctl daemon-reload; systemctl restart ntp; }
|
||||
[[ ! "$regen_conf_files" =~ "nftables.service.d/ynh-override.conf" ]] || systemctl daemon-reload
|
||||
}
|
||||
|
||||
_update_services() {
|
||||
|
@ -74,7 +123,7 @@ with open('services.yml') as f:
|
|||
new_services = yaml.load(f)
|
||||
|
||||
with open('/etc/yunohost/services.yml') as f:
|
||||
services = yaml.load(f)
|
||||
services = yaml.load(f) or {}
|
||||
|
||||
updated = False
|
||||
|
||||
|
@ -132,6 +181,7 @@ case "$1" in
|
|||
do_pre_regen $4
|
||||
;;
|
||||
post)
|
||||
do_post_regen $4
|
||||
;;
|
||||
init)
|
||||
do_init_regen
|
||||
|
|
|
@ -69,12 +69,11 @@ do_init_regen() {
|
|||
-out "${ssl_dir}/certs/yunohost_crt.pem" \
|
||||
-batch >>$LOGFILE 2>&1
|
||||
|
||||
last_cert=$(ls $ssl_dir/newcerts/*.pem | sort -V | tail -n 1)
|
||||
chmod 640 "${ssl_dir}/certs/yunohost_key.pem"
|
||||
chmod 640 "$last_cert"
|
||||
chmod 640 "${ssl_dir}/certs/yunohost_crt.pem"
|
||||
|
||||
cp "${ssl_dir}/certs/yunohost_key.pem" "$ynh_key"
|
||||
cp "$last_cert" "$ynh_crt"
|
||||
cp "${ssl_dir}/certs/yunohost_crt.pem" "$ynh_crt"
|
||||
ln -sf "$ynh_crt" /etc/ssl/certs/yunohost_crt.pem
|
||||
ln -sf "$ynh_key" /etc/ssl/private/yunohost_key.pem
|
||||
fi
|
||||
|
|
|
@ -12,27 +12,52 @@ do_init_regen() {
|
|||
|
||||
do_pre_regen ""
|
||||
|
||||
# fix some permissions
|
||||
chown root:openldap /etc/ldap/slapd.conf
|
||||
systemctl daemon-reload
|
||||
|
||||
_regenerate_slapd_conf
|
||||
|
||||
# Enforce permissions
|
||||
chown root:openldap /etc/ldap/slapd.ldif
|
||||
chown -R openldap:openldap /etc/ldap/schema/
|
||||
usermod -aG ssl-cert openldap
|
||||
|
||||
# check the slapd config file at first
|
||||
slaptest -Q -u -f /etc/ldap/slapd.conf
|
||||
|
||||
# regenerate LDAP config directory from slapd.conf
|
||||
rm -Rf /etc/ldap/slapd.d
|
||||
mkdir /etc/ldap/slapd.d
|
||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d/ 2>&1
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
|
||||
service slapd restart
|
||||
}
|
||||
|
||||
_regenerate_slapd_conf() {
|
||||
|
||||
# Validate the new slapd config
|
||||
# To do so, we have to use the .ldif to generate the config directory
|
||||
# so we use a temporary directory slapd_new.d
|
||||
rm -Rf /etc/ldap/slapd_new.d
|
||||
mkdir /etc/ldap/slapd_new.d
|
||||
slapadd -n0 -l /etc/ldap/slapd.ldif -F /etc/ldap/slapd_new.d/ 2>&1
|
||||
# Actual validation (-Q is for quiet, -u is for dry-run)
|
||||
slaptest -Q -u -F /etc/ldap/slapd_new.d
|
||||
|
||||
# "Commit" / apply the new config (meaning we delete the old one and replace
|
||||
# it with the new one)
|
||||
rm -Rf /etc/ldap/slapd.d
|
||||
mv /etc/ldap/slapd_new.d /etc/ldap/slapd.d
|
||||
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
}
|
||||
|
||||
do_pre_regen() {
|
||||
pending_dir=$1
|
||||
|
||||
cd /usr/share/yunohost/templates/slapd
|
||||
# remove temporary backup file
|
||||
rm -f "$tmp_backup_dir_file"
|
||||
|
||||
# Define if we need to migrate from hdb to mdb
|
||||
curr_backend=$(grep '^database' /etc/ldap/slapd.conf 2>/dev/null | awk '{print $2}')
|
||||
if [ -e /etc/ldap/slapd.conf ] && [ -n "$curr_backend" ] && \
|
||||
[ $curr_backend != 'mdb' ]; then
|
||||
backup_dir="/var/backups/dc=yunohost,dc=org-${curr_backend}-$(date +%s)"
|
||||
mkdir -p "$backup_dir"
|
||||
slapcat -b dc=yunohost,dc=org -l "${backup_dir}/dc=yunohost-dc=org.ldif"
|
||||
echo "$backup_dir" > "$tmp_backup_dir_file"
|
||||
fi
|
||||
|
||||
# create needed directories
|
||||
ldap_dir="${pending_dir}/etc/ldap"
|
||||
|
@ -40,28 +65,15 @@ do_pre_regen() {
|
|||
mkdir -p "$ldap_dir" "$schema_dir"
|
||||
|
||||
# remove legacy configuration file
|
||||
[ ! -f /etc/ldap/slapd-yuno.conf ] \
|
||||
|| touch "${pending_dir}/etc/ldap/slapd-yuno.conf"
|
||||
[ ! -f /etc/ldap/slapd-yuno.conf ] || touch "${ldap_dir}/slapd-yuno.conf"
|
||||
[ ! -f /etc/ldap/slapd.conf ] || touch "${ldap_dir}/slapd.conf"
|
||||
[ ! -f /etc/ldap/schema/yunohost.schema ] || touch "${schema_dir}/yunohost.schema"
|
||||
|
||||
# remove temporary backup file
|
||||
rm -f "$tmp_backup_dir_file"
|
||||
|
||||
# retrieve current and new backends
|
||||
curr_backend=$(grep '^database' /etc/ldap/slapd.conf 2>/dev/null | awk '{print $2}')
|
||||
new_backend=$(grep '^database' slapd.conf | awk '{print $2}')
|
||||
|
||||
# save current database before any conf changes
|
||||
if [[ -n "$curr_backend" && "$curr_backend" != "$new_backend" ]]; then
|
||||
backup_dir="/var/backups/dc=yunohost,dc=org-${curr_backend}-$(date +%s)"
|
||||
mkdir -p "$backup_dir"
|
||||
slapcat -b dc=yunohost,dc=org \
|
||||
-l "${backup_dir}/dc=yunohost-dc=org.ldif"
|
||||
echo "$backup_dir" > "$tmp_backup_dir_file"
|
||||
fi
|
||||
cd /usr/share/yunohost/templates/slapd
|
||||
|
||||
# copy configuration files
|
||||
cp -a ldap.conf slapd.conf "$ldap_dir"
|
||||
cp -a sudo.schema mailserver.schema yunohost.schema "$schema_dir"
|
||||
cp -a ldap.conf slapd.ldif "$ldap_dir"
|
||||
cp -a sudo.ldif mailserver.ldif permission.ldif "$schema_dir"
|
||||
|
||||
mkdir -p ${pending_dir}/etc/systemd/system/slapd.service.d/
|
||||
cp systemd-override.conf ${pending_dir}/etc/systemd/system/slapd.service.d/ynh-override.conf
|
||||
|
@ -72,19 +84,13 @@ do_pre_regen() {
|
|||
do_post_regen() {
|
||||
regen_conf_files=$1
|
||||
|
||||
# ensure that slapd.d exists
|
||||
mkdir -p /etc/ldap/slapd.d
|
||||
|
||||
# fix some permissions
|
||||
echo "Making sure we have the right permissions needed ..."
|
||||
echo "Enforce permissions on ldap/slapd directories and certs ..."
|
||||
# penldap user should be in the ssl-cert group to let it access the certificate for TLS
|
||||
usermod -aG ssl-cert openldap
|
||||
chown root:openldap /etc/ldap/slapd.conf
|
||||
chown root:openldap /etc/ldap/slapd.ldif
|
||||
chown -R openldap:openldap /etc/ldap/schema/
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
chown -R root:ssl-cert /etc/yunohost/certs/yunohost.org/
|
||||
chmod o-rwx /etc/yunohost/certs/yunohost.org/
|
||||
chmod -R g+rx /etc/yunohost/certs/yunohost.org/
|
||||
|
||||
# If we changed the systemd ynh-override conf
|
||||
if echo "$regen_conf_files" | sed 's/,/\n/g' | grep -q "^/etc/systemd/system/slapd.service.d/ynh-override.conf$"
|
||||
|
@ -95,29 +101,17 @@ do_post_regen() {
|
|||
|
||||
[ -z "$regen_conf_files" ] && exit 0
|
||||
|
||||
# check the slapd config file at first
|
||||
slaptest -Q -u -f /etc/ldap/slapd.conf
|
||||
# regenerate LDAP config directory from slapd.conf
|
||||
echo "Regenerate LDAP config directory from slapd.ldif"
|
||||
_regenerate_slapd_conf
|
||||
|
||||
# check if a backup should be restored
|
||||
# If there's a backup, re-import its data
|
||||
backup_dir=$(cat "$tmp_backup_dir_file" 2>/dev/null || true)
|
||||
if [[ -n "$backup_dir" && -f "${backup_dir}/dc=yunohost-dc=org.ldif" ]]; then
|
||||
# regenerate LDAP config directory and import database as root
|
||||
# since the admin user may be unavailable
|
||||
echo "Regenerate LDAP config directory and import the database using slapadd"
|
||||
sh -c "rm -Rf /etc/ldap/slapd.d;
|
||||
mkdir /etc/ldap/slapd.d;
|
||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d;
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d;
|
||||
slapadd -F /etc/ldap/slapd.d -b dc=yunohost,dc=org \
|
||||
-l '${backup_dir}/dc=yunohost-dc=org.ldif';
|
||||
chown -R openldap:openldap /var/lib/ldap" 2>&1
|
||||
else
|
||||
# regenerate LDAP config directory from slapd.conf
|
||||
echo "Regenerate LDAP config directory from slapd.conf"
|
||||
rm -Rf /etc/ldap/slapd.d
|
||||
mkdir /etc/ldap/slapd.d
|
||||
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d/ 2>&1
|
||||
chown -R openldap:openldap /etc/ldap/slapd.d/
|
||||
echo "Import the database using slapadd"
|
||||
slapadd -F /etc/ldap/slapd.d -b dc=yunohost,dc=org -l "${backup_dir}/dc=yunohost-dc=org.ldif"
|
||||
chown -R openldap:openldap /var/lib/ldap 2>&1
|
||||
fi
|
||||
|
||||
echo "Running slapdindex"
|
||||
|
@ -157,6 +151,9 @@ case "$1" in
|
|||
init)
|
||||
do_init_regen
|
||||
;;
|
||||
apply_config)
|
||||
do_post_regen /etc/ldap/slapd.ldif
|
||||
;;
|
||||
*)
|
||||
echo "hook called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
|
|
|
@ -57,6 +57,9 @@ do_post_regen() {
|
|||
done
|
||||
|
||||
# fix some permissions
|
||||
|
||||
# metronome should be in ssl-cert group to let it access SSL certificates
|
||||
usermod -aG ssl-cert metronome
|
||||
chown -R metronome: /var/lib/metronome/
|
||||
chown -R metronome: /etc/metronome/conf.d/
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@ do_init_regen() {
|
|||
ynh_render_template "security.conf.inc" "${nginx_conf_dir}/security.conf.inc"
|
||||
ynh_render_template "yunohost_admin.conf" "${nginx_conf_dir}/yunohost_admin.conf"
|
||||
|
||||
mkdir -p $nginx_conf_dir/default.d/
|
||||
cp "redirect_to_admin.conf" $nginx_conf_dir/default.d/
|
||||
|
||||
# Restart nginx if conf looks good, otherwise display error and exit unhappy
|
||||
nginx -t 2>/dev/null || { nginx -t; exit 1; }
|
||||
systemctl restart nginx || { journalctl --no-pager --lines=10 -u nginx >&2; exit 1; }
|
||||
|
@ -77,6 +80,8 @@ do_pre_regen() {
|
|||
done
|
||||
|
||||
ynh_render_template "yunohost_admin.conf" "${nginx_conf_dir}/yunohost_admin.conf"
|
||||
mkdir -p $nginx_conf_dir/default.d/
|
||||
cp "redirect_to_admin.conf" $nginx_conf_dir/default.d/
|
||||
|
||||
# remove old domain conf files
|
||||
conf_files=$(ls -1 /etc/nginx/conf.d \
|
||||
|
|
|
@ -42,6 +42,8 @@ do_post_regen() {
|
|||
chown _rspamd /etc/dkim/*.mail.key
|
||||
chmod 400 /etc/dkim/*.mail.key
|
||||
|
||||
[ ! -e /var/log/rspamd ] || chown _rspamd:_rspamd /var/log/rspamd
|
||||
|
||||
regen_conf_files=$1
|
||||
[ -z "$regen_conf_files" ] && exit 0
|
||||
|
||||
|
|
|
@ -15,6 +15,18 @@ do_pre_regen() {
|
|||
do_post_regen() {
|
||||
regen_conf_files=$1
|
||||
|
||||
# mysql is supposed to be an alias to mariadb... but in some weird case is not
|
||||
# c.f. https://forum.yunohost.org/t/mysql-ne-fonctionne-pas/11661
|
||||
# Playing with enable/disable allows to recreate the proper symlinks.
|
||||
if [ ! -e /etc/systemd/system/mysql.service ]
|
||||
then
|
||||
systemctl stop mysql -q
|
||||
systemctl disable mysql -q
|
||||
systemctl disable mariadb -q
|
||||
systemctl enable mariadb -q
|
||||
systemctl is-active mariadb -q || systemctl start mariadb
|
||||
fi
|
||||
|
||||
if [ ! -f /etc/yunohost/mysql ]; then
|
||||
|
||||
# ensure that mysql is running
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
do_pre_regen() {
|
||||
pending_dir=$1
|
||||
|
||||
cd /usr/share/yunohost/templates/glances
|
||||
|
||||
install -D -m 644 glances.default "${pending_dir}/etc/default/glances"
|
||||
}
|
||||
|
||||
do_post_regen() {
|
||||
regen_conf_files=$1
|
||||
|
||||
[[ -z "$regen_conf_files" ]] \
|
||||
|| service glances restart
|
||||
}
|
||||
|
||||
FORCE=${2:-0}
|
||||
DRY_RUN=${3:-0}
|
||||
|
||||
case "$1" in
|
||||
pre)
|
||||
do_pre_regen $4
|
||||
;;
|
||||
post)
|
||||
do_post_regen $4
|
||||
;;
|
||||
*)
|
||||
echo "hook called with unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
|
@ -64,8 +64,21 @@ do_post_regen() {
|
|||
systemctl restart resolvconf
|
||||
fi
|
||||
|
||||
[[ -z "$regen_conf_files" ]] \
|
||||
|| service dnsmasq restart
|
||||
# Some stupid things like rabbitmq-server used by onlyoffice won't work if
|
||||
# the *short* hostname doesn't exists in /etc/hosts -_-
|
||||
short_hostname=$(hostname -s)
|
||||
grep -q "127.0.0.1.*$short_hostname" /etc/hosts || echo -e "\n127.0.0.1\t$short_hostname" >>/etc/hosts
|
||||
|
||||
[[ -n "$regen_conf_files" ]] || return
|
||||
|
||||
# Remove / disable services likely to conflict with dnsmasq
|
||||
for SERVICE in systemd-resolved bind9
|
||||
do
|
||||
systemctl is-enabled $SERVICE &>/dev/null && systemctl disable $SERVICE 2>/dev/null
|
||||
systemctl is-active $SERVICE &>/dev/null && systemctl stop $SERVICE
|
||||
done
|
||||
|
||||
systemctl restart dnsmasq
|
||||
}
|
||||
|
||||
FORCE=${2:-0}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from moulinette.utils.process import check_output
|
||||
from moulinette.utils.filesystem import read_file
|
||||
from moulinette.utils.filesystem import read_file, read_json, write_to_json
|
||||
from yunohost.diagnosis import Diagnoser
|
||||
from yunohost.utils.packages import ynh_packages_version
|
||||
|
||||
|
@ -32,7 +34,7 @@ class BaseSystemDiagnoser(Diagnoser):
|
|||
|
||||
# Also possibly the board name
|
||||
if os.path.exists("/proc/device-tree/model"):
|
||||
model = read_file('/proc/device-tree/model').strip()
|
||||
model = read_file('/proc/device-tree/model').strip().replace('\x00', '')
|
||||
hardware["data"]["model"] = model
|
||||
hardware["details"] = ["diagnosis_basesystem_hardware_board"]
|
||||
|
||||
|
@ -74,5 +76,75 @@ class BaseSystemDiagnoser(Diagnoser):
|
|||
details=ynh_version_details)
|
||||
|
||||
|
||||
if self.is_vulnerable_to_meltdown():
|
||||
yield dict(meta={"test": "meltdown"},
|
||||
status="ERROR",
|
||||
summary="diagnosis_security_vulnerable_to_meltdown",
|
||||
details=["diagnosis_security_vulnerable_to_meltdown_details"]
|
||||
)
|
||||
|
||||
def is_vulnerable_to_meltdown(self):
|
||||
# meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754
|
||||
|
||||
# We use a cache file to avoid re-running the script so many times,
|
||||
# which can be expensive (up to around 5 seconds on ARM)
|
||||
# and make the admin appear to be slow (c.f. the calls to diagnosis
|
||||
# from the webadmin)
|
||||
#
|
||||
# The cache is in /tmp and shall disappear upon reboot
|
||||
# *or* we compare it to dpkg.log modification time
|
||||
# such that it's re-ran if there was package upgrades
|
||||
# (e.g. from yunohost)
|
||||
cache_file = "/tmp/yunohost-meltdown-diagnosis"
|
||||
dpkg_log = "/var/log/dpkg.log"
|
||||
if os.path.exists(cache_file):
|
||||
if not os.path.exists(dpkg_log) or os.path.getmtime(cache_file) > os.path.getmtime(dpkg_log):
|
||||
self.logger_debug("Using cached results for meltdown checker, from %s" % cache_file)
|
||||
return read_json(cache_file)[0]["VULNERABLE"]
|
||||
|
||||
# script taken from https://github.com/speed47/spectre-meltdown-checker
|
||||
# script commit id is store directly in the script
|
||||
SCRIPT_PATH = "/usr/lib/moulinette/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh"
|
||||
|
||||
# '--variant 3' corresponds to Meltdown
|
||||
# example output from the script:
|
||||
# [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}]
|
||||
try:
|
||||
self.logger_debug("Running meltdown vulnerability checker")
|
||||
call = subprocess.Popen("bash %s --batch json --variant 3" %
|
||||
SCRIPT_PATH, shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
# TODO / FIXME : here we are ignoring error messages ...
|
||||
# in particular on RPi2 and other hardware, the script complains about
|
||||
# "missing some kernel info (see -v), accuracy might be reduced"
|
||||
# Dunno what to do about that but we probably don't want to harass
|
||||
# users with this warning ...
|
||||
output, err = call.communicate()
|
||||
assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode
|
||||
|
||||
# If there are multiple lines, sounds like there was some messages
|
||||
# in stdout that are not json >.> ... Try to get the actual json
|
||||
# stuff which should be the last line
|
||||
output = output.strip()
|
||||
if "\n" in output:
|
||||
self.logger_debug("Original meltdown checker output : %s" % output)
|
||||
output = output.split("\n")[-1]
|
||||
|
||||
CVEs = json.loads(output)
|
||||
assert len(CVEs) == 1
|
||||
assert CVEs[0]["NAME"] == "MELTDOWN"
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
self.logger_warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e)
|
||||
raise Exception("Command output for failed meltdown check: '%s'" % output)
|
||||
|
||||
self.logger_debug("Writing results from meltdown checker to cache file, %s" % cache_file)
|
||||
write_to_json(cache_file, CVEs)
|
||||
return CVEs[0]["VULNERABLE"]
|
||||
|
||||
|
||||
def main(args, env, loggers):
|
||||
return BaseSystemDiagnoser(args, env, loggers).diagnose()
|
||||
|
|
|
@ -108,7 +108,7 @@ class IPDiagnoser(Diagnoser):
|
|||
return False
|
||||
|
||||
# If we are indeed connected in ipv4 or ipv6, we should find a default route
|
||||
routes = check_output("ip -%s route" % protocol).split("\n")
|
||||
routes = check_output("ip -%s route show table all" % protocol).split("\n")
|
||||
|
||||
def is_default_route(r):
|
||||
# Typically the default route starts with "default"
|
||||
|
|
|
@ -12,9 +12,7 @@ from yunohost.utils.network import dig
|
|||
from yunohost.diagnosis import Diagnoser
|
||||
from yunohost.domain import domain_list, _build_dns_conf, _get_maindomain
|
||||
|
||||
# We put here domains we know has dyndns provider, but that are not yet
|
||||
# registered in the public suffix list
|
||||
PENDING_SUFFIX_LIST = ['ynh.fr', 'netlib.re']
|
||||
YNH_DYNDNS_DOMAINS = ['nohost.me', 'noho.st', 'ynh.fr']
|
||||
|
||||
|
||||
class DNSRecordsDiagnoser(Diagnoser):
|
||||
|
@ -38,7 +36,7 @@ class DNSRecordsDiagnoser(Diagnoser):
|
|||
psl = PublicSuffixList()
|
||||
domains_from_registrar = [psl.get_public_suffix(domain) for domain in all_domains]
|
||||
domains_from_registrar = [domain for domain in domains_from_registrar if "." in domain]
|
||||
domains_from_registrar = set(domains_from_registrar) - set(PENDING_SUFFIX_LIST)
|
||||
domains_from_registrar = set(domains_from_registrar) - set(YNH_DYNDNS_DOMAINS + ["netlib.re"])
|
||||
for report in self.check_expiration_date(domains_from_registrar):
|
||||
yield report
|
||||
|
||||
|
@ -100,7 +98,13 @@ class DNSRecordsDiagnoser(Diagnoser):
|
|||
summary=summary)
|
||||
|
||||
if discrepancies:
|
||||
output["details"] = ["diagnosis_dns_point_to_doc"] + discrepancies
|
||||
# For ynh-managed domains (nohost.me etc...), tell people to try to "yunohost dyndns update --force"
|
||||
if any(domain.endswith(ynh_dyndns_domain) for ynh_dyndns_domain in YNH_DYNDNS_DOMAINS):
|
||||
output["details"] = ["diagnosis_dns_try_dyndns_update_force"]
|
||||
# Otherwise point to the documentation
|
||||
else:
|
||||
output["details"] = ["diagnosis_dns_point_to_doc"]
|
||||
output["details"] += discrepancies
|
||||
|
||||
yield output
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ class PortsDiagnoser(Diagnoser):
|
|||
# If any AAAA record is set, IPv6 is important...
|
||||
def ipv6_is_important():
|
||||
dnsrecords = Diagnoser.get_cached_report("dnsrecords") or {}
|
||||
return any(record["data"]["AAAA:@"] in ["OK", "WRONG"] for record in dnsrecords.get("items", []))
|
||||
return any(record["data"].get("AAAA:@") in ["OK", "WRONG"] for record in dnsrecords.get("items", []))
|
||||
|
||||
if failed == 4 or ipv6_is_important():
|
||||
yield dict(meta={"port": port},
|
||||
|
|
|
@ -126,7 +126,7 @@ class MailDiagnoser(Diagnoser):
|
|||
query += '.ip6.arpa'
|
||||
|
||||
# Do the DNS Query
|
||||
status, value = dig(query, 'PTR')
|
||||
status, value = dig(query, 'PTR', resolvers="force_external")
|
||||
if status == "nok":
|
||||
yield dict(meta={"test": "mail_fcrdns", "ipversion": ipversion},
|
||||
data={"ip": ip, "ehlo_domain": self.ehlo_domain},
|
||||
|
|
|
@ -21,7 +21,7 @@ class ServicesDiagnoser(Diagnoser):
|
|||
data={"status": result["status"], "configuration": result["configuration"]})
|
||||
|
||||
if result["status"] != "running":
|
||||
item["status"] = "ERROR"
|
||||
item["status"] = "ERROR" if result["status"] != "unknown" else "WARNING"
|
||||
item["summary"] = "diagnosis_services_bad_status"
|
||||
item["details"] = ["diagnosis_services_bad_status_tip"]
|
||||
|
||||
|
|
|
@ -45,14 +45,15 @@ class SystemResourcesDiagnoser(Diagnoser):
|
|||
item = dict(meta={"test": "swap"},
|
||||
data={"total": human_size(swap.total), "recommended": "512 MiB"})
|
||||
if swap.total <= 1 * MB:
|
||||
item["status"] = "ERROR"
|
||||
item["status"] = "INFO"
|
||||
item["summary"] = "diagnosis_swap_none"
|
||||
elif swap.total < 500 * MB:
|
||||
item["status"] = "WARNING"
|
||||
elif swap.total < 450 * MB:
|
||||
item["status"] = "INFO"
|
||||
item["summary"] = "diagnosis_swap_notsomuch"
|
||||
else:
|
||||
item["status"] = "SUCCESS"
|
||||
item["summary"] = "diagnosis_swap_ok"
|
||||
item["details"] = ["diagnosis_swap_tip"]
|
||||
yield item
|
||||
|
||||
# FIXME : add a check that swapiness is low if swap is on a sdcard...
|
||||
|
|
|
@ -1,98 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from yunohost.diagnosis import Diagnoser
|
||||
from moulinette.utils.filesystem import read_json, write_to_json
|
||||
|
||||
|
||||
class SecurityDiagnoser(Diagnoser):
|
||||
|
||||
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
|
||||
cache_duration = 3600
|
||||
dependencies = []
|
||||
|
||||
def run(self):
|
||||
|
||||
"CVE-2017-5754"
|
||||
|
||||
if self.is_vulnerable_to_meltdown():
|
||||
yield dict(meta={"test": "meltdown"},
|
||||
status="ERROR",
|
||||
summary="diagnosis_security_vulnerable_to_meltdown",
|
||||
details=["diagnosis_security_vulnerable_to_meltdown_details"]
|
||||
)
|
||||
else:
|
||||
yield dict(meta={},
|
||||
status="SUCCESS",
|
||||
summary="diagnosis_security_all_good"
|
||||
)
|
||||
|
||||
|
||||
def is_vulnerable_to_meltdown(self):
|
||||
# meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754
|
||||
|
||||
# We use a cache file to avoid re-running the script so many times,
|
||||
# which can be expensive (up to around 5 seconds on ARM)
|
||||
# and make the admin appear to be slow (c.f. the calls to diagnosis
|
||||
# from the webadmin)
|
||||
#
|
||||
# The cache is in /tmp and shall disappear upon reboot
|
||||
# *or* we compare it to dpkg.log modification time
|
||||
# such that it's re-ran if there was package upgrades
|
||||
# (e.g. from yunohost)
|
||||
cache_file = "/tmp/yunohost-meltdown-diagnosis"
|
||||
dpkg_log = "/var/log/dpkg.log"
|
||||
if os.path.exists(cache_file):
|
||||
if not os.path.exists(dpkg_log) or os.path.getmtime(cache_file) > os.path.getmtime(dpkg_log):
|
||||
self.logger_debug("Using cached results for meltdown checker, from %s" % cache_file)
|
||||
return read_json(cache_file)[0]["VULNERABLE"]
|
||||
|
||||
# script taken from https://github.com/speed47/spectre-meltdown-checker
|
||||
# script commit id is store directly in the script
|
||||
SCRIPT_PATH = "/usr/lib/moulinette/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh"
|
||||
|
||||
# '--variant 3' corresponds to Meltdown
|
||||
# example output from the script:
|
||||
# [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}]
|
||||
try:
|
||||
self.logger_debug("Running meltdown vulnerability checker")
|
||||
call = subprocess.Popen("bash %s --batch json --variant 3" %
|
||||
SCRIPT_PATH, shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
# TODO / FIXME : here we are ignoring error messages ...
|
||||
# in particular on RPi2 and other hardware, the script complains about
|
||||
# "missing some kernel info (see -v), accuracy might be reduced"
|
||||
# Dunno what to do about that but we probably don't want to harass
|
||||
# users with this warning ...
|
||||
output, err = call.communicate()
|
||||
assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode
|
||||
|
||||
# If there are multiple lines, sounds like there was some messages
|
||||
# in stdout that are not json >.> ... Try to get the actual json
|
||||
# stuff which should be the last line
|
||||
output = output.strip()
|
||||
if "\n" in output:
|
||||
self.logger_debug("Original meltdown checker output : %s" % output)
|
||||
output = output.split("\n")[-1]
|
||||
|
||||
CVEs = json.loads(output)
|
||||
assert len(CVEs) == 1
|
||||
assert CVEs[0]["NAME"] == "MELTDOWN"
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
self.logger_warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e)
|
||||
raise Exception("Command output for failed meltdown check: '%s'" % output)
|
||||
|
||||
self.logger_debug("Writing results from meltdown checker to cache file, %s" % cache_file)
|
||||
write_to_json(cache_file, CVEs)
|
||||
return CVEs[0]["VULNERABLE"]
|
||||
|
||||
|
||||
def main(args, env, loggers):
|
||||
return SecurityDiagnoser(args, env, loggers).diagnose()
|
|
@ -39,6 +39,9 @@ else
|
|||
# Restore the configuration
|
||||
mv /etc/ldap/slapd.d "$TMPDIR"
|
||||
mkdir -p /etc/ldap/slapd.d
|
||||
cp -a "${backup_dir}/ldap.conf" /etc/ldap/ldap.conf
|
||||
cp -a "${backup_dir}/slapd.ldif" /etc/ldap/slapd.ldif
|
||||
# Legacy thing but we need it to force the regen-conf in case of it exist
|
||||
cp -a "${backup_dir}/slapd.conf" /etc/ldap/slapd.conf
|
||||
slapadd -F /etc/ldap/slapd.d -b cn=config \
|
||||
-l "${backup_dir}/cn=config.master.ldif" \
|
||||
|
|
|
@ -1,42 +1,5 @@
|
|||
backup_dir="$1/conf/ynh/mysql"
|
||||
MYSQL_PKG="$(dpkg --list | sed -ne 's/^ii \(mariadb-server-[[:digit:].]\+\) .*$/\1/p')"
|
||||
# We don't backup/restore mysql password anymore
|
||||
# c.f. https://github.com/YunoHost/yunohost/pull/912
|
||||
|
||||
. /usr/share/yunohost/helpers
|
||||
|
||||
# ensure that mysql is running
|
||||
service mysql status >/dev/null 2>&1 \
|
||||
|| service mysql start
|
||||
|
||||
# retrieve current and new password
|
||||
[ -f /etc/yunohost/mysql ] \
|
||||
&& curr_pwd=$(cat /etc/yunohost/mysql)
|
||||
new_pwd=$(cat "${backup_dir}/root_pwd" || cat "${backup_dir}/mysql")
|
||||
[ -z "$curr_pwd" ] && curr_pwd="yunohost"
|
||||
[ -z "$new_pwd" ] && {
|
||||
new_pwd=$(ynh_string_random 10)
|
||||
}
|
||||
|
||||
# attempt to change it
|
||||
mysqladmin -s -u root -p"$curr_pwd" password "$new_pwd" || {
|
||||
|
||||
echo "It seems that you have already configured MySQL." \
|
||||
"YunoHost needs to have a root access to MySQL to runs its" \
|
||||
"applications, and is going to reset the MySQL root password." \
|
||||
"You can find this new password in /etc/yunohost/mysql." >&2
|
||||
|
||||
# set new password with debconf
|
||||
debconf-set-selections << EOF
|
||||
$MYSQL_PKG mysql-server/root_password password $new_pwd
|
||||
$MYSQL_PKG mysql-server/root_password_again password $new_pwd
|
||||
EOF
|
||||
|
||||
# reconfigure Debian package
|
||||
dpkg-reconfigure -freadline -u "$MYSQL_PKG" 2>&1
|
||||
}
|
||||
|
||||
# store new root password
|
||||
echo "$new_pwd" | tee /etc/yunohost/mysql
|
||||
chmod 400 /etc/yunohost/mysql
|
||||
|
||||
# reload the grant tables
|
||||
mysqladmin -s -u root -p"$new_pwd" reload
|
||||
# This is a dummy empty file as a workaround for
|
||||
# https://github.com/YunoHost/issues/issues/1553 until it is fixed
|
||||
|
|
8
data/other/ffdhe2048.pem
Normal file
8
data/other/ffdhe2048.pem
Normal file
|
@ -0,0 +1,8 @@
|
|||
-----BEGIN DH PARAMETERS-----
|
||||
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
|
||||
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
|
||||
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
|
||||
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
|
||||
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
|
||||
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
|
||||
-----END DH PARAMETERS-----
|
|
@ -14,18 +14,16 @@ mail_plugins = $mail_plugins quota
|
|||
|
||||
###############################################################################
|
||||
|
||||
# generated 2020-04-03, Mozilla Guideline v5.4, Dovecot 2.2.27, OpenSSL 1.1.0l, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=dovecot&version=2.2.27&config=intermediate&openssl=1.1.0l&guideline=5.4
|
||||
# generated 2020-05-02, Mozilla Guideline v5.4, Dovecot 2.3.4.1, OpenSSL 1.1.1d, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=dovecot&version=2.3.4.1&config=intermediate&openssl=1.1.1d&guideline=5.4
|
||||
|
||||
ssl = required
|
||||
|
||||
ssl_cert = </etc/yunohost/certs/{{ main_domain }}/crt.pem
|
||||
ssl_key = </etc/yunohost/certs/{{ main_domain }}/key.pem
|
||||
|
||||
ssl_dh_parameters_length = 2048
|
||||
|
||||
# intermediate configuration
|
||||
ssl_protocols = TLSv1.2
|
||||
ssl_min_protocol = TLSv1.2
|
||||
ssl_cipher_list = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
|
||||
ssl_prefer_server_ciphers = no
|
||||
|
||||
|
|
|
@ -47,6 +47,10 @@ Component "muc.{{ domain }}" "muc"
|
|||
|
||||
muc_event_rate = 0.5
|
||||
muc_burst_factor = 10
|
||||
room_default_config = {
|
||||
logging = true,
|
||||
persistent = true
|
||||
};
|
||||
|
||||
---Set up a PubSub server
|
||||
Component "pubsub.{{ domain }}" "pubsub"
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
lua_shared_dict cache 10m;
|
||||
init_by_lua_file /usr/share/ssowat/init.lua;
|
||||
server_names_hash_bucket_size 64;
|
||||
server_names_hash_bucket_size 128;
|
||||
|
|
|
@ -6,6 +6,9 @@ location /yunohost/admin/ {
|
|||
default_type text/html;
|
||||
index index.html;
|
||||
|
||||
more_set_headers "Content-Security-Policy: upgrade-insecure-requests; default-src 'self'; connect-src 'self' https://raw.githubusercontent.com https://paste.yunohost.org wss://$host; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-eval'; object-src 'none';";
|
||||
more_set_headers "Content-Security-Policy-Report-Only:";
|
||||
|
||||
# Short cache on handlebars templates
|
||||
location ~* \.(?:ms)$ {
|
||||
expires 5m;
|
||||
|
|
3
data/templates/nginx/redirect_to_admin.conf
Normal file
3
data/templates/nginx/redirect_to_admin.conf
Normal file
|
@ -0,0 +1,3 @@
|
|||
location / {
|
||||
return 302 https://$http_host/yunohost/admin;
|
||||
}
|
|
@ -2,27 +2,31 @@ ssl_session_timeout 1d;
|
|||
ssl_session_cache shared:SSL:50m; # about 200000 sessions
|
||||
ssl_session_tickets off;
|
||||
|
||||
# nginx 1.10 in stretch doesn't support TLS1.3 and Mozilla doesn't have any
|
||||
# "modern" config recommendation with it.
|
||||
# So until buster the modern conf is same as intermediate
|
||||
{% if compatibility == "modern" %} {% else %} {% endif %}
|
||||
|
||||
{% if compatibility == "modern" %}
|
||||
# generated 2020-08-14, Mozilla Guideline v5.6, nginx 1.14.2, OpenSSL 1.1.1d, modern configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=modern&openssl=1.1.1d&guideline=5.6
|
||||
ssl_protocols TLSv1.3;
|
||||
ssl_prefer_server_ciphers off;
|
||||
{% else %}
|
||||
# Ciphers with intermediate compatibility
|
||||
# generated 2020-04-03, Mozilla Guideline v5.4, nginx 1.10.3, OpenSSL 1.1.0l, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.10.3&config=intermediate&openssl=1.1.0l&guideline=5.4
|
||||
ssl_protocols TLSv1.2;
|
||||
# generated 2020-08-14, Mozilla Guideline v5.6, nginx 1.14.2, OpenSSL 1.1.1d, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.6
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
|
||||
ssl_prefer_server_ciphers off;
|
||||
|
||||
# Uncomment the following directive after DH generation
|
||||
# > openssl dhparam -out /etc/ssl/private/dh2048.pem -outform PEM -2 2048
|
||||
#ssl_dhparam /etc/ssl/private/dh2048.pem;
|
||||
# Pre-defined FFDHE group (RFC 7919)
|
||||
# From https://ssl-config.mozilla.org/ffdhe2048.txt
|
||||
# https://security.stackexchange.com/a/149818
|
||||
ssl_dhparam /usr/share/yunohost/other/ffdhe2048.pem;
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners
|
||||
# https://wiki.mozilla.org/Security/Guidelines/Web_Security
|
||||
# https://observatory.mozilla.org/
|
||||
more_set_headers "Content-Security-Policy : upgrade-insecure-requests";
|
||||
more_set_headers "Content-Security-Policy-Report-Only : default-src https: data: 'unsafe-inline' 'unsafe-eval'";
|
||||
more_set_headers "Content-Security-Policy-Report-Only : default-src https: data: 'unsafe-inline' 'unsafe-eval' ";
|
||||
more_set_headers "X-Content-Type-Options : nosniff";
|
||||
more_set_headers "X-XSS-Protection : 1; mode=block";
|
||||
more_set_headers "X-Download-Options : noopen";
|
||||
|
|
|
@ -2,13 +2,7 @@ server {
|
|||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
location / {
|
||||
return 302 https://$http_host/yunohost/admin;
|
||||
}
|
||||
|
||||
location /yunohost/admin {
|
||||
return 301 https://$http_host$request_uri;
|
||||
}
|
||||
include /etc/nginx/conf.d/default.d/*.conf;
|
||||
}
|
||||
|
||||
server {
|
||||
|
@ -22,23 +16,13 @@ server {
|
|||
|
||||
more_set_headers "Strict-Transport-Security : max-age=63072000; includeSubDomains; preload";
|
||||
more_set_headers "Referrer-Policy : 'same-origin'";
|
||||
more_set_headers "Content-Security-Policy : upgrade-insecure-requests; object-src 'none'; script-src https: 'unsafe-eval'";
|
||||
|
||||
location / {
|
||||
return 302 https://$http_host/yunohost/admin;
|
||||
}
|
||||
|
||||
location /yunohost {
|
||||
# Block crawlers bot
|
||||
if ($http_user_agent ~ (crawl|Googlebot|Slurp|spider|bingbot|tracker|click|parser|spider|facebookexternalhit) ) {
|
||||
return 403;
|
||||
}
|
||||
# X-Robots-Tag to precise the rules applied.
|
||||
add_header X-Robots-Tag "nofollow, noindex, noarchive, nosnippet";
|
||||
# Redirect most of 404 to maindomain.tld/yunohost/sso
|
||||
access_by_lua_file /usr/share/ssowat/access.lua;
|
||||
}
|
||||
|
||||
include /etc/nginx/conf.d/yunohost_admin.conf.inc;
|
||||
include /etc/nginx/conf.d/yunohost_api.conf.inc;
|
||||
include /etc/nginx/conf.d/default.d/*.conf;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,18 @@ base dc=yunohost,dc=org
|
|||
# The LDAP protocol version to use.
|
||||
#ldap_version 3
|
||||
|
||||
# The DN to bind with for normal lookups.
|
||||
#binddn cn=annonymous,dc=example,dc=net
|
||||
#bindpw secret
|
||||
|
||||
# The DN used for password modifications by root.
|
||||
#rootpwmoddn cn=admin,dc=example,dc=com
|
||||
|
||||
# SSL options
|
||||
#ssl off
|
||||
#tls_reqcert never
|
||||
tls_cacertfile /etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
# The search scope.
|
||||
#scope sub
|
||||
|
||||
|
|
|
@ -1,12 +1,8 @@
|
|||
# /etc/nsswitch.conf
|
||||
#
|
||||
# Example configuration of GNU Name Service Switch functionality.
|
||||
# If you have the `glibc-doc-reference' and `info' packages installed, try:
|
||||
# `info libc "Name Service Switch"' for information about this file.
|
||||
|
||||
passwd: compat ldap
|
||||
group: compat ldap
|
||||
shadow: compat ldap
|
||||
passwd: files systemd ldap
|
||||
group: files systemd ldap
|
||||
shadow: files ldap
|
||||
gshadow: files
|
||||
|
||||
hosts: files myhostname mdns4_minimal [NOTFOUND=return] dns
|
||||
|
|
|
@ -170,7 +170,7 @@ smtpd_milters = inet:localhost:11332
|
|||
milter_default_action = accept
|
||||
|
||||
# Avoid to send simultaneously too many emails
|
||||
smtp_destination_concurrency_limit = 1
|
||||
smtp_destination_concurrency_limit = 2
|
||||
default_destination_rate_delay = 5s
|
||||
|
||||
# Avoid email adress scanning
|
||||
|
|
|
@ -2,58 +2,62 @@
|
|||
## Version 0.1
|
||||
## Adrien Beudin
|
||||
|
||||
dn: cn=mailserver,cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: mailserver
|
||||
#
|
||||
# Attributes
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.1
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.1
|
||||
NAME 'maildrop'
|
||||
DESC 'Mail addresses where mails are forwarded -- ie forwards'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.2
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.2
|
||||
NAME 'mailalias'
|
||||
DESC 'Mail addresses accepted by this account -- ie aliases'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.3
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.3
|
||||
NAME 'mailenable'
|
||||
DESC 'Mail Account validity'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{8})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.4
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.4
|
||||
NAME 'mailbox'
|
||||
DESC 'Mailbox path where mails are delivered'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.5
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.5
|
||||
NAME 'virtualdomain'
|
||||
DESC 'A mail domain name'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.6
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.6
|
||||
NAME 'virtualdomaindescription'
|
||||
DESC 'Virtual domain description'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{512})
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.40328.1.20.2.7
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.40328.1.20.2.7
|
||||
NAME 'mailuserquota'
|
||||
DESC 'Mailbox quota for a user'
|
||||
EQUALITY caseIgnoreMatch
|
||||
SUBSTR caseIgnoreSubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{16} SINGLE-VALUE )
|
||||
|
||||
#
|
||||
# Mail Account Objectclass
|
||||
objectclass ( 1.3.6.1.4.1.40328.1.1.2.1
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.40328.1.1.2.1
|
||||
NAME 'mailAccount'
|
||||
DESC 'Mail Account'
|
||||
SUP top
|
||||
|
@ -65,9 +69,9 @@ objectclass ( 1.3.6.1.4.1.40328.1.1.2.1
|
|||
mailalias $ maildrop $ mailenable $ mailbox $ mailuserquota
|
||||
)
|
||||
)
|
||||
|
||||
#
|
||||
# Mail Domain Objectclass
|
||||
objectclass ( 1.3.6.1.4.1.40328.1.1.2.2
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.40328.1.1.2.2
|
||||
NAME 'mailDomain'
|
||||
DESC 'Domain mail entry'
|
||||
SUP top
|
||||
|
@ -79,9 +83,9 @@ objectclass ( 1.3.6.1.4.1.40328.1.1.2.2
|
|||
virtualdomaindescription $ mailuserquota
|
||||
)
|
||||
)
|
||||
|
||||
#
|
||||
# Mail Group Objectclass
|
||||
objectclass ( 1.3.6.1.4.1.40328.1.1.2.3
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.40328.1.1.2.3
|
||||
NAME 'mailGroup' SUP top AUXILIARY
|
||||
DESC 'Mail Group'
|
||||
MUST ( mail )
|
|
@ -1,33 +1,35 @@
|
|||
#dn: cn=yunohost,cn=schema,cn=config
|
||||
#objectClass: olcSchemaConfig
|
||||
#cn: yunohost
|
||||
# Yunohost schema for group and permission support
|
||||
|
||||
dn: cn=yunohost,cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: yunohost
|
||||
# ATTRIBUTES
|
||||
# For Permission
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.1 NAME 'permission'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.1 NAME 'permission'
|
||||
DESC 'Yunohost permission on user and group side'
|
||||
SUP distinguishedName )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.2 NAME 'groupPermission'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.2 NAME 'groupPermission'
|
||||
DESC 'Yunohost permission for a group on permission side'
|
||||
SUP distinguishedName )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.3 NAME 'inheritPermission'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.3 NAME 'inheritPermission'
|
||||
DESC 'Yunohost permission for user on permission side'
|
||||
SUP distinguishedName )
|
||||
attributetype ( 1.3.6.1.4.1.17953.9.1.4 NAME 'URL'
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.17953.9.1.4 NAME 'URL'
|
||||
DESC 'Yunohost application URL'
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} )
|
||||
# OBJECTCLASS
|
||||
# For Applications
|
||||
objectclass ( 1.3.6.1.4.1.17953.9.2.1 NAME 'groupOfNamesYnh'
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.17953.9.2.1 NAME 'groupOfNamesYnh'
|
||||
DESC 'Yunohost user group'
|
||||
SUP top AUXILIARY
|
||||
MAY ( member $ businessCategory $ seeAlso $ owner $ ou $ o $ permission ) )
|
||||
objectclass ( 1.3.6.1.4.1.17953.9.2.2 NAME 'permissionYnh'
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.17953.9.2.2 NAME 'permissionYnh'
|
||||
DESC 'a Yunohost application'
|
||||
SUP top AUXILIARY
|
||||
MUST cn
|
||||
MAY ( groupPermission $ inheritPermission $ URL ) )
|
||||
# For User
|
||||
objectclass ( 1.3.6.1.4.1.17953.9.2.3 NAME 'userPermissionYnh'
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.17953.9.2.3 NAME 'userPermissionYnh'
|
||||
DESC 'a Yunohost application'
|
||||
SUP top AUXILIARY
|
||||
MAY ( permission ) )
|
|
@ -1,154 +0,0 @@
|
|||
# This is the main slapd configuration file. See slapd.conf(5) for more
|
||||
# info on the configuration options.
|
||||
|
||||
#######################################################################
|
||||
# Global Directives:
|
||||
|
||||
# Features to permit
|
||||
#allow bind_v2
|
||||
|
||||
# Schema and objectClass definitions
|
||||
include /etc/ldap/schema/core.schema
|
||||
include /etc/ldap/schema/cosine.schema
|
||||
include /etc/ldap/schema/nis.schema
|
||||
include /etc/ldap/schema/inetorgperson.schema
|
||||
include /etc/ldap/schema/mailserver.schema
|
||||
include /etc/ldap/schema/sudo.schema
|
||||
include /etc/ldap/schema/yunohost.schema
|
||||
|
||||
# Where the pid file is put. The init.d script
|
||||
# will not stop the server if you change this.
|
||||
pidfile /var/run/slapd/slapd.pid
|
||||
|
||||
# List of arguments that were passed to the server
|
||||
argsfile /var/run/slapd/slapd.args
|
||||
|
||||
# Read slapd.conf(5) for possible values
|
||||
loglevel none
|
||||
|
||||
# Hashes to be used in generation of user passwords
|
||||
password-hash {SSHA}
|
||||
|
||||
# Where the dynamically loaded modules are stored
|
||||
modulepath /usr/lib/ldap
|
||||
moduleload back_mdb
|
||||
moduleload memberof
|
||||
|
||||
# The maximum number of entries that is returned for a search operation
|
||||
sizelimit 500
|
||||
|
||||
# The tool-threads parameter sets the actual amount of cpu's that is used
|
||||
# for indexing.
|
||||
tool-threads 1
|
||||
|
||||
# TLS Support
|
||||
TLSCertificateFile /etc/yunohost/certs/yunohost.org/crt.pem
|
||||
TLSCertificateKeyFile /etc/yunohost/certs/yunohost.org/key.pem
|
||||
|
||||
#######################################################################
|
||||
# Specific Backend Directives for mdb:
|
||||
# Backend specific directives apply to this backend until another
|
||||
# 'backend' directive occurs
|
||||
backend mdb
|
||||
|
||||
#######################################################################
|
||||
# Specific Directives for database #1, of type mdb:
|
||||
# Database specific directives apply to this databasse until another
|
||||
# 'database' directive occurs
|
||||
database mdb
|
||||
|
||||
# The base of your directory in database #1
|
||||
suffix "dc=yunohost,dc=org"
|
||||
|
||||
# rootdn directive for specifying a superuser on the database. This is needed
|
||||
# for syncrepl.
|
||||
# rootdn "cn=admin,dc=yunohost,dc=org"
|
||||
|
||||
# Where the database file are physically stored for database #1
|
||||
directory "/var/lib/ldap"
|
||||
|
||||
# Indexing options for database #1
|
||||
index objectClass eq
|
||||
index uid,sudoUser eq,sub
|
||||
index entryCSN,entryUUID eq
|
||||
index cn,mail eq
|
||||
index gidNumber,uidNumber eq
|
||||
index member,memberUid,uniqueMember eq
|
||||
index virtualdomain eq
|
||||
index permission eq
|
||||
|
||||
# Save the time that the entry gets modified, for database #1
|
||||
lastmod on
|
||||
|
||||
# Checkpoint the BerkeleyDB database periodically in case of system
|
||||
# failure and to speed slapd shutdown.
|
||||
checkpoint 512 30
|
||||
|
||||
# The userPassword by default can be changed
|
||||
# by the entry owning it if they are authenticated.
|
||||
# Others should not be able to see it, except the
|
||||
# admin entry below
|
||||
# These access lines apply to database #1 only
|
||||
access to attrs=userPassword,shadowLastChange
|
||||
by dn="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by anonymous auth
|
||||
by self write
|
||||
by * none
|
||||
|
||||
# Personnal information can be changed by the entry
|
||||
# owning it if they are authenticated.
|
||||
# Others should be able to see it.
|
||||
access to attrs=cn,gecos,givenName,mail,maildrop,displayName,sn
|
||||
by dn="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by self write
|
||||
by * read
|
||||
|
||||
# Ensure read access to the base for things like
|
||||
# supportedSASLMechanisms. Without this you may
|
||||
# have problems with SASL not knowing what
|
||||
# mechanisms are available and the like.
|
||||
# Note that this is covered by the 'access to *'
|
||||
# ACL below too but if you change that as people
|
||||
# are wont to do you'll still need this if you
|
||||
# want SASL (and possible other things) to work
|
||||
# happily.
|
||||
access to dn.base="" by * read
|
||||
|
||||
# The admin dn has full write access, everyone else
|
||||
# can read everything.
|
||||
access to *
|
||||
by dn="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.exact="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by group/groupOfNames/Member="cn=admin,ou=groups,dc=yunohost,dc=org" write
|
||||
by * read
|
||||
|
||||
# Configure Memberof Overlay (used for Yunohost permission)
|
||||
|
||||
# Link user <-> group
|
||||
#dn: olcOverlay={0}memberof,olcDatabase={1}mdb,cn=config
|
||||
overlay memberof
|
||||
memberof-group-oc groupOfNamesYnh
|
||||
memberof-member-ad member
|
||||
memberof-memberof-ad memberOf
|
||||
memberof-dangling error
|
||||
memberof-refint TRUE
|
||||
|
||||
# Link permission <-> groupes
|
||||
#dn: olcOverlay={1}memberof,olcDatabase={1}mdb,cn=config
|
||||
overlay memberof
|
||||
memberof-group-oc permissionYnh
|
||||
memberof-member-ad groupPermission
|
||||
memberof-memberof-ad permission
|
||||
memberof-dangling error
|
||||
memberof-refint TRUE
|
||||
|
||||
# Link permission <-> user
|
||||
#dn: olcOverlay={2}memberof,olcDatabase={1}mdb,cn=config
|
||||
overlay memberof
|
||||
memberof-group-oc permissionYnh
|
||||
memberof-member-ad inheritPermission
|
||||
memberof-memberof-ad permission
|
||||
memberof-dangling error
|
||||
memberof-refint TRUE
|
235
data/templates/slapd/slapd.ldif
Normal file
235
data/templates/slapd/slapd.ldif
Normal file
|
@ -0,0 +1,235 @@
|
|||
# OpenLDAP server configuration for Yunohost
|
||||
# ------------------------------------------
|
||||
#
|
||||
# Because of the Yunohost's regen-conf mechanism, it is NOT POSSIBLE to
|
||||
# edit the config database using an LDAP request.
|
||||
#
|
||||
# If you wish to edit the config database, you should edit THIS file
|
||||
# and update the config database based on this file.
|
||||
#
|
||||
# Config database customization:
|
||||
# 1. Edit this file as you want.
|
||||
# 2. Apply your modifications. For this just run this following command in a shell:
|
||||
# $ /usr/share/yunohost/hooks/conf_regen/06-slapd apply_config
|
||||
#
|
||||
# Note that if you customize this file, YunoHost's regen-conf will NOT
|
||||
# overwrite this file. But that also means that you should be careful about
|
||||
# upgrades, because they may ship important/necessary changes to this
|
||||
# configuration that you will have to propagate yourself.
|
||||
|
||||
#
|
||||
# Main configuration
|
||||
#
|
||||
dn: cn=config
|
||||
objectClass: olcGlobal
|
||||
cn: config
|
||||
olcConfigFile: /etc/ldap/slapd.conf
|
||||
olcConfigDir: /etc/ldap/slapd.d/
|
||||
# List of arguments that were passed to the server
|
||||
olcArgsFile: /var/run/slapd/slapd.args
|
||||
#
|
||||
olcAttributeOptions: lang-
|
||||
olcAuthzPolicy: none
|
||||
olcConcurrency: 0
|
||||
olcConnMaxPending: 100
|
||||
olcConnMaxPendingAuth: 1000
|
||||
olcIdleTimeout: 0
|
||||
olcIndexSubstrIfMaxLen: 4
|
||||
olcIndexSubstrIfMinLen: 2
|
||||
olcIndexSubstrAnyLen: 4
|
||||
olcIndexSubstrAnyStep: 2
|
||||
olcIndexIntLen: 4
|
||||
olcListenerThreads: 1
|
||||
olcLocalSSF: 71
|
||||
# Read slapd.conf(5) for possible values
|
||||
olcLogLevel: None
|
||||
# Where the pid file is put. The init.d script
|
||||
# will not stop the server if you change this.
|
||||
olcPidFile: /var/run/slapd/slapd.pid
|
||||
olcReverseLookup: FALSE
|
||||
olcThreads: 16
|
||||
# TLS Support
|
||||
olcTLSCertificateFile: /etc/yunohost/certs/yunohost.org/crt.pem
|
||||
olcTLSCertificateKeyFile: /etc/yunohost/certs/yunohost.org/key.pem
|
||||
olcTLSVerifyClient: never
|
||||
olcTLSProtocolMin: 0.0
|
||||
# The tool-threads parameter sets the actual amount of cpu's that is used
|
||||
# for indexing.
|
||||
olcToolThreads: 1
|
||||
structuralObjectClass: olcGlobal
|
||||
|
||||
#
|
||||
# Schema and objectClass definitions
|
||||
#
|
||||
dn: cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: schema
|
||||
|
||||
include: file:///etc/ldap/schema/core.ldif
|
||||
include: file:///etc/ldap/schema/cosine.ldif
|
||||
include: file:///etc/ldap/schema/nis.ldif
|
||||
include: file:///etc/ldap/schema/inetorgperson.ldif
|
||||
include: file:///etc/ldap/schema/mailserver.ldif
|
||||
include: file:///etc/ldap/schema/sudo.ldif
|
||||
include: file:///etc/ldap/schema/permission.ldif
|
||||
|
||||
#
|
||||
# Module management
|
||||
#
|
||||
dn: cn=module{0},cn=config
|
||||
objectClass: olcModuleList
|
||||
cn: module{0}
|
||||
# Where the dynamically loaded modules are stored
|
||||
olcModulePath: /usr/lib/ldap
|
||||
olcModuleLoad: {0}back_mdb
|
||||
olcModuleLoad: {1}memberof
|
||||
structuralObjectClass: olcModuleList
|
||||
|
||||
#
|
||||
# Frontend database
|
||||
#
|
||||
dn: olcDatabase={-1}frontend,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
objectClass: olcFrontendConfig
|
||||
olcDatabase: {-1}frontend
|
||||
olcAddContentAcl: FALSE
|
||||
olcLastMod: TRUE
|
||||
olcSchemaDN: cn=Subschema
|
||||
# Hashes to be used in generation of user passwords
|
||||
olcPasswordHash: {SSHA}
|
||||
structuralObjectClass: olcDatabaseConfig
|
||||
|
||||
#
|
||||
# Config database Configuration (#0)
|
||||
#
|
||||
dn: olcDatabase={0}config,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
olcDatabase: {0}config
|
||||
# Give access to root user.
|
||||
# This give the possiblity to the admin to customize the LDAP configuration
|
||||
olcAccess: {0}to * by * none
|
||||
olcAddContentAcl: TRUE
|
||||
olcLastMod: TRUE
|
||||
olcRootDN: cn=config
|
||||
structuralObjectClass: olcDatabaseConfig
|
||||
|
||||
#
|
||||
# Main database Configuration (#1)
|
||||
#
|
||||
dn: olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcDatabaseConfig
|
||||
objectClass: olcMdbConfig
|
||||
olcDatabase: {1}mdb
|
||||
# The base of your directory in database #1
|
||||
olcSuffix: dc=yunohost,dc=org
|
||||
#
|
||||
# The userPassword by default can be changed
|
||||
# by the entry owning it if they are authenticated.
|
||||
# Others should not be able to see it, except the
|
||||
# admin entry below
|
||||
# These access lines apply to database #1 only
|
||||
olcAccess: {0}to attrs=userPassword,shadowLastChange
|
||||
by dn.base="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by anonymous auth
|
||||
by self write
|
||||
by * none
|
||||
#
|
||||
# Personnal information can be changed by the entry
|
||||
# owning it if they are authenticated.
|
||||
# Others should be able to see it.
|
||||
olcAccess: {1}to attrs=cn,gecos,givenName,mail,maildrop,displayName,sn
|
||||
by dn.base="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by self write
|
||||
by * read
|
||||
#
|
||||
# Ensure read access to the base for things like
|
||||
# supportedSASLMechanisms. Without this you may
|
||||
# have problems with SASL not knowing what
|
||||
# mechanisms are available and the like.
|
||||
# Note that this is covered by the 'access to *'
|
||||
# ACL below too but if you change that as people
|
||||
# are wont to do you'll still need this if you
|
||||
# want SASL (and possible other things) to work
|
||||
# happily.
|
||||
olcAccess: {2}to dn.base=""
|
||||
by * read
|
||||
#
|
||||
# The admin dn has full write access, everyone else
|
||||
# can read everything.
|
||||
olcAccess: {3}to *
|
||||
by dn.base="cn=admin,dc=yunohost,dc=org" write
|
||||
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write
|
||||
by group/groupOfNames/member.exact="cn=admin,ou=groups,dc=yunohost,dc=org" write
|
||||
by * read
|
||||
#
|
||||
olcAddContentAcl: FALSE
|
||||
# Save the time that the entry gets modified, for database #1
|
||||
olcLastMod: TRUE
|
||||
# Where the database file are physically stored for database #1
|
||||
olcDbDirectory: /var/lib/ldap
|
||||
# Checkpoint the BerkeleyDB database periodically in case of system
|
||||
# failure and to speed slapd shutdown.
|
||||
olcDbCheckpoint: 512 30
|
||||
olcDbNoSync: FALSE
|
||||
# Indexing options for database #1
|
||||
olcDbIndex: objectClass eq
|
||||
olcDbIndex: entryUUID eq
|
||||
olcDbIndex: entryCSN eq
|
||||
olcDbIndex: cn eq
|
||||
olcDbIndex: uid eq,sub
|
||||
olcDbIndex: uidNumber eq
|
||||
olcDbIndex: gidNumber eq
|
||||
olcDbIndex: sudoUser eq,sub
|
||||
olcDbIndex: member eq
|
||||
olcDbIndex: mail eq
|
||||
olcDbIndex: memberUid eq
|
||||
olcDbIndex: uniqueMember eq
|
||||
olcDbIndex: virtualdomain eq
|
||||
olcDbIndex: permission eq
|
||||
olcDbMaxSize: 10485760
|
||||
structuralObjectClass: olcMdbConfig
|
||||
|
||||
#
|
||||
# Configure Memberof Overlay (used for Yunohost permission)
|
||||
#
|
||||
|
||||
# Link user <-> group
|
||||
dn: olcOverlay={0}memberof,olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: {0}memberof
|
||||
olcMemberOfDangling: error
|
||||
olcMemberOfDanglingError: constraintViolation
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: groupOfNamesYnh
|
||||
olcMemberOfMemberAD: member
|
||||
olcMemberOfMemberOfAD: memberOf
|
||||
structuralObjectClass: olcMemberOf
|
||||
|
||||
# Link permission <-> groupes
|
||||
dn: olcOverlay={1}memberof,olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: {1}memberof
|
||||
olcMemberOfDangling: error
|
||||
olcMemberOfDanglingError: constraintViolation
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: permissionYnh
|
||||
olcMemberOfMemberAD: groupPermission
|
||||
olcMemberOfMemberOfAD: permission
|
||||
structuralObjectClass: olcMemberOf
|
||||
|
||||
# Link permission <-> user
|
||||
dn: olcOverlay={2}memberof,olcDatabase={1}mdb,cn=config
|
||||
objectClass: olcOverlayConfig
|
||||
objectClass: olcMemberOf
|
||||
olcOverlay: {2}memberof
|
||||
olcMemberOfDangling: error
|
||||
olcMemberOfDanglingError: constraintViolation
|
||||
olcMemberOfRefInt: TRUE
|
||||
olcMemberOfGroupOC: permissionYnh
|
||||
olcMemberOfMemberAD: inheritPermission
|
||||
olcMemberOfMemberOfAD: permission
|
||||
structuralObjectClass: olcMemberOf
|
|
@ -1,76 +1,78 @@
|
|||
#
|
||||
# OpenLDAP schema file for Sudo
|
||||
# Save as /etc/openldap/schema/sudo.schema
|
||||
# Save as /etc/openldap/schema/sudo.ldif
|
||||
#
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.1
|
||||
dn: cn=sudo,cn=schema,cn=config
|
||||
objectClass: olcSchemaConfig
|
||||
cn: sudo
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.1
|
||||
NAME 'sudoUser'
|
||||
DESC 'User(s) who may run sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SUBSTR caseExactIA5SubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.2
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.2
|
||||
NAME 'sudoHost'
|
||||
DESC 'Host(s) who may run sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SUBSTR caseExactIA5SubstringsMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.3
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.3
|
||||
NAME 'sudoCommand'
|
||||
DESC 'Command(s) to be executed by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.4
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.4
|
||||
NAME 'sudoRunAs'
|
||||
DESC 'User(s) impersonated by sudo (deprecated)'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.5
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.5
|
||||
NAME 'sudoOption'
|
||||
DESC 'Options(s) followed by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.6
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.6
|
||||
NAME 'sudoRunAsUser'
|
||||
DESC 'User(s) impersonated by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.7
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.7
|
||||
NAME 'sudoRunAsGroup'
|
||||
DESC 'Group(s) impersonated by sudo'
|
||||
EQUALITY caseExactIA5Match
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.8
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.8
|
||||
NAME 'sudoNotBefore'
|
||||
DESC 'Start of time interval for which the entry is valid'
|
||||
EQUALITY generalizedTimeMatch
|
||||
ORDERING generalizedTimeOrderingMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 )
|
||||
|
||||
attributetype ( 1.3.6.1.4.1.15953.9.1.9
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.9
|
||||
NAME 'sudoNotAfter'
|
||||
DESC 'End of time interval for which the entry is valid'
|
||||
EQUALITY generalizedTimeMatch
|
||||
ORDERING generalizedTimeOrderingMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 )
|
||||
|
||||
attributeTypes ( 1.3.6.1.4.1.15953.9.1.10
|
||||
#
|
||||
olcAttributeTypes: ( 1.3.6.1.4.1.15953.9.1.10
|
||||
NAME 'sudoOrder'
|
||||
DESC 'an integer to order the sudoRole entries'
|
||||
EQUALITY integerMatch
|
||||
ORDERING integerOrderingMatch
|
||||
SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )
|
||||
|
||||
objectclass ( 1.3.6.1.4.1.15953.9.2.1 NAME 'sudoRole' SUP top STRUCTURAL
|
||||
#
|
||||
olcObjectClasses: ( 1.3.6.1.4.1.15953.9.2.1 NAME 'sudoRole' SUP top STRUCTURAL
|
||||
DESC 'Sudoer Entries'
|
||||
MUST ( cn )
|
||||
MAY ( sudoUser $ sudoHost $ sudoCommand $ sudoRunAs $ sudoRunAsUser $ sudoRunAsGroup $ sudoOption $ sudoOrder $ sudoNotBefore $ sudoNotAfter $
|
||||
description )
|
||||
MAY ( sudoUser $ sudoHost $ sudoCommand $ sudoRunAs $ sudoRunAsUser $ sudoRunAsGroup $ sudoOption $ sudoOrder $ sudoNotBefore $ sudoNotAfter $ description )
|
||||
)
|
|
@ -27,9 +27,6 @@ HostKey {{ key }}{% endfor %}
|
|||
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com
|
||||
{% endif %}
|
||||
|
||||
# Use kernel sandbox mechanisms where possible in unprivileged processes
|
||||
UsePrivilegeSeparation sandbox
|
||||
|
||||
# LogLevel VERBOSE logs user's key fingerprint on login.
|
||||
# Needed to have a clear audit track of which key was using to log in.
|
||||
SyslogFacility AUTH
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# https://wiki.debian.org/UnattendedUpgrades#automatic_call_via_.2Fetc.2Fapt.2Fapt.conf.d.2F02periodic
|
||||
APT::Periodic::Enable "1";
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
APT::Periodic::Verbose "1";
|
|
@ -1,36 +0,0 @@
|
|||
// Automatically upgrade packages from these (origin, archive) pairs
|
||||
Unattended-Upgrade::Allowed-Origins {
|
||||
"${distro_id} stable";
|
||||
"${distro_id} testing";
|
||||
"Depot-Debian testing";
|
||||
"${distro_id} ${distro_codename}-security";
|
||||
"${distro_id} ${distro_codename}-updates";
|
||||
// "${distro_id} ${distro_codename}-proposed-updates";
|
||||
};
|
||||
|
||||
// List of packages to not update
|
||||
Unattended-Upgrade::Package-Blacklist {
|
||||
// "vim";
|
||||
// "libc6";
|
||||
// "libc6-dev";
|
||||
// "libc6-i686";
|
||||
};
|
||||
|
||||
// Send email to this address for problems or packages upgrades
|
||||
// If empty or unset then no email is sent, make sure that you
|
||||
// have a working mail setup on your system. The package 'mailx'
|
||||
// must be installed or anything that provides /usr/bin/mail.
|
||||
//Unattended-Upgrade::Mail "root@localhost";
|
||||
|
||||
// Do automatic removal of new unused dependencies after the upgrade
|
||||
// (equivalent to apt-get autoremove)
|
||||
Unattended-Upgrade::Remove-Unused-Dependencies "true";
|
||||
|
||||
// Automatically reboot *WITHOUT CONFIRMATION* if a
|
||||
// the file /var/run/reboot-required is found after the upgrade
|
||||
Unattended-Upgrade::Automatic-Reboot "false";
|
||||
|
||||
|
||||
// Use apt bandwidth limit feature, this example limits the download
|
||||
// speed to 70kb/sec
|
||||
//Acquire::http::Dl-Limit "70";
|
|
@ -20,10 +20,9 @@ nginx:
|
|||
test_conf: nginx -t
|
||||
needs_exposed_ports: [80, 443]
|
||||
category: web
|
||||
nslcd: {}
|
||||
php7.0-fpm:
|
||||
log: /var/log/php7.0-fpm.log
|
||||
test_conf: php-fpm7.0 --test
|
||||
php7.3-fpm:
|
||||
log: /var/log/php7.3-fpm.log
|
||||
test_conf: php-fpm7.3 --test
|
||||
category: web
|
||||
postfix:
|
||||
log: [/var/log/mail.log,/var/log/mail.err]
|
||||
|
@ -64,3 +63,5 @@ postgrey: null
|
|||
spamassassin: null
|
||||
rmilter: null
|
||||
php5-fpm: null
|
||||
php7.0-fpm: null
|
||||
nslcd: null
|
||||
|
|
259
debian/changelog
vendored
259
debian/changelog
vendored
|
@ -1,3 +1,260 @@
|
|||
yunohost (4.0.4) stable; urgency=low
|
||||
|
||||
- Debugging and robustness improvements for postgresql 9.6 -> 11 and xtables->nftables migrations (accc2da4, 59bd7d66, 4cb6f7fd, 4b14402c)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 12 Aug 2020 18:14:00 +0200
|
||||
|
||||
yunohost (4.0.3) stable; urgency=low
|
||||
|
||||
- Bump version number for stable release
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 29 Jul 2020 17:00:00 +0200
|
||||
|
||||
yunohost (4.0.2~beta) testing; urgency=low
|
||||
|
||||
- [mod] Rebase on stretch-unstable to include recent changes
|
||||
- [fix] Create admin's home during postinstall (#1021)
|
||||
|
||||
Thanks to all contributors <3 ! (Kay0u)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 19 Jun 2020 15:16:26 +0200
|
||||
|
||||
yunohost (4.0.1~alpha) testing; urgency=low
|
||||
|
||||
- [fix] It just make no sense to backup/restore the mysql password... (#911)
|
||||
- [fix] Fix getopts and helpers (#885, #886)
|
||||
- [fix] Explicitly create home using mkhomedir_helper instead of obscure pam rule that doesn't work anymore (b67ff314)
|
||||
- [fix] Ldap interface seems to expect lists everywhere now? (fb8c2b7b)
|
||||
- [deb] Clean control file, remove some legacy Conflicts and Replaces (ca0d4933)
|
||||
- [deb] Add conflicts with versions from backports for critical dependencies (#967)
|
||||
- [cleanup] Stale / legacy code (217aaa36, d77da6a0, af047468, 82d468a3)
|
||||
- [conf] Automatically disable/stop systemd-resolved that conflicts with dnsmasq on fresh setups ... (e7214b37)
|
||||
- [conf] Remove deprecated option in sshd conf, c.f. https://patchwork.openembedded.org/patch/139981/ (2723d245)
|
||||
- [conf] Small tweak in dovecot conf (deprecated settings) (dc0481e2)
|
||||
- [conf] Update nslcd and nsswitch stuff using new Buster's default configs + get rid of nslcd service, only keep the regen-conf part (6ef3520f)
|
||||
- [php] Migrate from php7.0 to php7.3 (3374e653, 9be10506, dd9564d3, 9679c291, 212a15e4, 25fcaa19, c4ad66f5)
|
||||
- [psql] Migrate from psql 11 to 9.6 (e88aed72, 4920d4f9, c70b0ae4)
|
||||
- [firewall] Migrate from xtable to nftable (05fb58f2, 2c4a8b73, 625d5372)
|
||||
- [slapd] Rework slapd regenconf to use new backend (#984)
|
||||
|
||||
Thanks to all contributors <3 ! (Étienne M., Josué, Kay0u)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 05 Jun 2020 03:10:09 +0200
|
||||
|
||||
yunohost (3.8.5.5) stable; urgency=low
|
||||
|
||||
- [enh] Allow to extend the nginx default_server configuration (f1bfc521)
|
||||
- [mod] Move redirect to /yunohost/admin to a separate nginx conf file to allow customizing it more easily (ac9182d6)
|
||||
- [enh] Make sure to validate/upgrade that we don't have any active weak certificate used by nginx at the beginning of the buster migration, otherwise nginx will later miserably fail to start (d4358897)
|
||||
- [fix] get_files_diff crashing if {orig,new}_file is None (7bfe564a)
|
||||
- [enh] Remove some useless message about file that "wasn't deleted because it doesn't exist." (#1024)
|
||||
- [mod] Remove useless robot protection code (#1026)
|
||||
- [fix] Let's not redefine the value for the 'service' var ... (1a2f26dc)
|
||||
- [fix] More general stretch->buster patching for sources.list (#1028)
|
||||
- [mod] Tweak custom disclaimer about the migration still being a bit touchy in preparation for stable release (852dea07)
|
||||
- [mod] Typo/wording in en.json (#1030)
|
||||
- [i18n] Translations updated for Catalan, French, Italian, Occitan
|
||||
|
||||
Thanks to all contributors <3 ! (É. Gaspar, Kay0u, L. Noferini, ppr, Quentí, xaloc33)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Mon, 27 Jul 2020 19:03:33 +0200
|
||||
|
||||
yunohost (3.8.5.4) testing; urgency=low
|
||||
|
||||
- [fix] Fix unscd version parsing *again*
|
||||
- [fix] Enforce permissions on rspamd log directory
|
||||
- [enh] Ignore stupid warnings about sudo-ldap that is already provided
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sun, 21 Jun 2020 23:37:09 +0200
|
||||
|
||||
yunohost (3.8.5.3) testing; urgency=low
|
||||
|
||||
- [fix] Fix the fix about unscd downgrade :/
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 19 Jun 2020 18:50:58 +0200
|
||||
|
||||
yunohost (3.8.5.2) testing; urgency=low
|
||||
|
||||
- [fix] Small issue with unscd upgrade/downgrade ... new version ain't always 0.53.1, so find it using dirty scrapping
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 18 Jun 2020 16:19:35 +0200
|
||||
|
||||
yunohost (3.8.5.1) testing; urgency=low
|
||||
|
||||
- [fix] Update Stretch->Buster migration disclaimer to make it clear that this is alpha-stage
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 06 Jun 2020 03:30:00 +0200
|
||||
|
||||
yunohost (3.8.5) testing; urgency=low
|
||||
|
||||
- [enh] Add migration procedure for Stretch->Buster (a2b83c0f, a26411db, 9f1211e9, e544bf3e, a0511cca)
|
||||
- [fix] Disable/skip ntp when inside a container (9d0c0924)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 06 Jun 2020 02:11:51 +0200
|
||||
|
||||
yunohost (3.8.4.9) stable; urgency=low
|
||||
|
||||
- [fix] Force lowercase on domain names (804f4b3e)
|
||||
- [fix] Add dirmngr to Depends:, needed for apt-key / gpg (cd115ed8)
|
||||
- [fix] Improve debugging when diagnosis ain't happy when renewing certs (0f0194be)
|
||||
- [enh] Add yunohost version to logs metadata (d615546b)
|
||||
- [enh] Alway filter irrelevant log lines when sharing it (38704cba, 51d53be5)
|
||||
- [fix] Regen-conf outputing many 'forget-about-it' because of files flagged as to be removed (f4525488)
|
||||
- [fix] postfix per-domain destination concurrency (#988)
|
||||
- [fix] Call regenconf for ssh before the general regenconf during the postinstall to avoid an irrelevant warning (7805837b)
|
||||
- [i18n] Translations updated for Catalan, French, German
|
||||
|
||||
Thanks to all contributors <3 ! (taziden, ljf, ppr, xaloc33, Yasss Gurl)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 18 Jun 2020 15:13:01 +0200
|
||||
|
||||
yunohost (3.8.4.8) stable; urgency=low
|
||||
|
||||
- [fix] Don't add unprotected_urls if it's already in skipped_urls (#1005)
|
||||
- [enh] Add pre-defined DHE group and set up Nginx to use it (#1007)
|
||||
- [fix] Make sure to propagate change in slapd systemd conf during initial install (2d42480f)
|
||||
- [fix] More accurate grep to avoid mistakenly grepping commented lines... (2408a620)
|
||||
- [enh] Update n to 6.5.1 (#1012)
|
||||
- [fix] Set sury default pinning to 600 (653c5fde)
|
||||
- [enh] Clean stale file/hashes in regen-conf (#1009)
|
||||
- [fix] Weirdness in regen-conf mechanism for SSH conf (#1014)
|
||||
|
||||
Thanks to all contributors <3 ! (É. Gaspar, Josué, SohKa)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 06 Jun 2020 01:59:08 +0200
|
||||
|
||||
yunohost (3.8.4.7) stable; urgency=low
|
||||
|
||||
- [fix] Remove some remains of glances (17eec25e)
|
||||
- [fix] Force external resolution for reverse DNS dig (852cd14c)
|
||||
- [fix] Make sure mysql is an alias to mariadb (e24191ce, ca89607d)
|
||||
- [fix] Path for ynh_add_fpm_config template in restore (#1001)
|
||||
- [fix] Add -o Acquire::Retries=3 to fix some stupid network issues happening sometimes with apt (03432349)
|
||||
- [fix] ynh_setup_source: Retry wget on non-critical failures to try to avoid tmp dns issues (3d66eaec)
|
||||
- [fix] ynh_setup_source: Calling ynh_print_err in case of error didn't work, and we probably want a ynh_die here (55036fad)
|
||||
- [i18n] Translations updated for Catalan, French, Italian, Occitan
|
||||
|
||||
Thanks to all contributors <3 ! (JimboJoe, Leandro N., ppr, Quentí, xaloc33, yalh76)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 04 Jun 2020 02:28:33 +0200
|
||||
|
||||
yunohost (3.8.4.6) stable; urgency=low
|
||||
|
||||
- [fix] Bump server_names_hash_bucket_size to 128 to avoid nginx exploding for stupid reasons (b3db4d92)
|
||||
- [fix] More sensible strategy for sury pinning (#1006)
|
||||
- [fix] Stop trying to fetch log categories that are not implemented yet T.T (77bd9ae3)
|
||||
- [enh] Add logging and persistent as default config for new muc room (#1008)
|
||||
- [tests] Moar tests for app args parsing (#1004)
|
||||
|
||||
Thanks to all contributors <3 ! (Gabriel, Kay0u, Bram)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 28 May 2020 00:22:10 +0200
|
||||
|
||||
yunohost (3.8.4.5) stable; urgency=low
|
||||
|
||||
- [enh] Tell systemctl to stfu about creating symlinks when enabling/disabling services (6637c8a8)
|
||||
- [enh] Add maindomain in diagnosis email subject (e30e25fa)
|
||||
- [fix] Webpath should also be normalized for args_list, so that we can get rid of the 'malformed path' check of the CI... (58ce6e5e)
|
||||
- [fix] Increase time window for auto diagnosis cron to avoid remote diagnosis server overload (dc221495)
|
||||
- [fix] encoding bullshit (4c600125, 64596bc1)
|
||||
- [fix] Typo in diagnosis message + fix FR translation report format of bad DNS conf (#1002, b8f8ea14)
|
||||
- [fix] Flag old etckeeper.conf as 'should not exist' in regenconf (5a3b382f)
|
||||
- [enh] Detect dyndns-domains managed by yunohost and advice to use yunohost dyndns update --force (8b169f13)
|
||||
- [enh] Complain if apps savagely edit system configurations during install and upgrade (a23f02db)
|
||||
- [i18n] Translations updated for Arabic, Catalan, French, German, Italian
|
||||
- [tests] CI V2 : Rework CI workflow (#991)
|
||||
|
||||
Thanks to all contributors <3 ! (ButterflyOfFire, Kay0u, L. Noferini, rynas, V. Rubiolo, xaloc33, Yasss Gurl)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Tue, 26 May 2020 03:20:39 +0200
|
||||
|
||||
yunohost (3.8.4.4) stable; urgency=low
|
||||
|
||||
- [fix] Crash when the services file is empty (85f1802)
|
||||
- [fix] IPv6 detection when using wg-quick (#997)
|
||||
- [fix] Use a .get() to avoid crash if key doesn't exist (1f1b2338)
|
||||
- [enh] Don't display the hostname when calling journalctl, this takes horizontal space for nothing (2bcfb5a1)
|
||||
- [fix] Add --quiet, otherwise getopts is confused by "-- Logs" at the beginning (bdbf1822)
|
||||
- [mod] We don't need those color codes... and warnings are already warnings... (2a631fa2)
|
||||
- [fix] psql_setup_db: Do not create a new password if the user already exists (#998)
|
||||
- [enh] Add an exception if packaging format is not recognized (f0cc6798)
|
||||
|
||||
Thanks to all contributors <3 ! (Aleks, Julien Rabier, Kayou)
|
||||
|
||||
-- Kay0u <pierre@kayou.io> Fri, 22 May 2020 19:26:05 +0000
|
||||
|
||||
yunohost (3.8.4.3) stable; urgency=low
|
||||
|
||||
- [fix] Workaround for the sury pinning issues when installing dependencies
|
||||
- [i18n] Translations updated for Catalan, French, Occitan
|
||||
|
||||
Thanks to all contributors <3 ! (Aleks, clecle226, Kay0u, ppr, Quenti)
|
||||
|
||||
-- Kay0u <pierre@kayou.io> Wed, 20 May 2020 18:41:49 +0000
|
||||
|
||||
yunohost (3.8.4.2) testing; urgency=low
|
||||
|
||||
- [enh] During failed upgrades: Only mention packages that couldn't be upgraded (26fcfed7)
|
||||
- [enh] Also run dpkg --audit to check if dpkg is in a broken state (09d8500f, 97199d19)
|
||||
- [enh] Improve logs readability (c6f18496, 9cbd368d, 5850bf61, 413778d2, 5c8c07b8, f73c34bf, 94ea8265)
|
||||
- [enh] Crash early about apps already installed when attempting to restore (f9e4c96c)
|
||||
- [fix] Add the damn short hostname to /etc/hosts automagically (c.f. rabbitmq-server) (e67dc791)
|
||||
- [fix] Don't miserably crash if doveadm fails to run (c9b22138)
|
||||
- [fix] Diagnosis: Try to not have weird warnings if no diagnosis ran yet... (65c87d55)
|
||||
- [fix] Diagnosis: Change logic of --email to avoid sending empty mail if some issues are found but ignored (4cd4938e)
|
||||
- [enh] Diagnosis/services: Report the service status as warning/unknown if service type is oneshot and status exited (dd09758f, 1cd7ffea)
|
||||
- [fix] Rework ynh_psql_test_if_first_run (#993)
|
||||
- [tests] Tests for args parsing (#989, 108a3ca4)
|
||||
|
||||
Thanks to all contributors <3 ! (Bram, Kayou)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Tue, 19 May 2020 20:08:47 +0200
|
||||
|
||||
yunohost (3.8.4.1) testing; urgency=low
|
||||
|
||||
- [mod] Tweak diagnosis threshold for swap warning (429df8c4)
|
||||
- [fix] Make sure we have a list for log_list + make sure item is in list before using .remove()... (afbeb145, 43facfd5)
|
||||
- [fix] Sometimes tree-model has a weird \x00 which breaks yunopaste (c346f5f1)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Mon, 11 May 2020 00:50:34 +0200
|
||||
|
||||
yunohost (3.8.4) testing; urgency=low
|
||||
|
||||
- [fix] Restoration of custom hooks / missing restore hooks (#927)
|
||||
- [enh] Real CSP headers for the webadmin (#961)
|
||||
- [enh] Simplify / optimize reading version of yunohost packages... (#968)
|
||||
- [fix] handle new auto restart of ldap in moulinette (#975)
|
||||
- [enh] service.py cleanup + add tests for services (#979, #986)
|
||||
- [fix] Enforce permissions for stuff in /etc/yunohost/ (#963)
|
||||
- [mod] Remove security diagnosis category for now, Move meltdown check to base system (a799740a)
|
||||
- [mod] Change warning/errors about swap as info instead ... add a tip about the fact that having swap on SD or SSD is dangerous (23147161)
|
||||
- [enh] Improve auto diagnosis cron UX, add a --human-readable option to diagnosis_show() (aecbb14a)
|
||||
- [enh] Rely on new diagnosis for letsencrypt elligibility (#985)
|
||||
- [i18n] Translations updated for Catalan, Esperanto, French, Spanish
|
||||
|
||||
Thanks to all contributors <3 ! (amirale qt, autra, Bram, clecle226, I. Hernández, Kay0u, xaloc33)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 09 May 2020 21:20:00 +0200
|
||||
|
||||
yunohost (3.8.3) testing; urgency=low
|
||||
|
||||
- [fix] Remove dot in reverse DNS check
|
||||
- [fix] Upgrade of multi-instance apps was broken (#976)
|
||||
- [fix] Check was broken if an apps with no domain setting was installed (#978)
|
||||
- [enh] Add a timeout to wget (#972)
|
||||
- [fix] ynh_get_ram: Enforce choosing --free or --total (#972)
|
||||
- [fix] Simplify / improve robustness of backup list
|
||||
- [enh] Make nodejs helpers easier to use (#939)
|
||||
- [fix] Misc tweak for disk usage diagnosis, some values were inconsistent / bad UX / ...
|
||||
- [enh] Assert slapd is running to avoid miserably crashing with weird ldap errors
|
||||
- [enh] Try to show smarter / more useful logs by filtering irrelevant lines like set +x etc
|
||||
- Technical tweaks for metronome 3.14.0 support
|
||||
- Misc improvements for tests and linters
|
||||
|
||||
Thanks to all contributors <3 ! (Bram, Kay0u, Maniack C., ljf, Maranda)
|
||||
|
||||
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 07 Apr 2020 04:00:00 +0000
|
||||
|
||||
yunohost (3.8.2.2) testing; urgency=low
|
||||
|
||||
Aleks broke everything /again/ *.*
|
||||
|
@ -1626,7 +1883,7 @@ yunohost (2.5.2) testing; urgency=low
|
|||
|
||||
Other fixes and improvements:
|
||||
* [enh] remove timeout from cli interface
|
||||
* [fix] [#662](https://dev.yunohost.org/issues/662): missing 'python-openssl' dependency for Let's Encrypt integration.
|
||||
* [fix] #662: missing 'python-openssl' dependency for Let's Encrypt integration.
|
||||
* [fix] --no-remove-on-failure for app install should behave as a flag.
|
||||
* [fix] don't remove trailing char if it's not a slash
|
||||
|
||||
|
|
6
debian/conf/pam/mkhomedir
vendored
6
debian/conf/pam/mkhomedir
vendored
|
@ -1,6 +0,0 @@
|
|||
Name: Create home directory during login
|
||||
Default: yes
|
||||
Priority: 900
|
||||
Session-Type: Additional
|
||||
Session:
|
||||
required pam_mkhomedir.so umask=0022 skel=/etc/skel
|
37
debian/control
vendored
37
debian/control
vendored
|
@ -11,12 +11,11 @@ Package: yunohost
|
|||
Essential: yes
|
||||
Architecture: all
|
||||
Depends: ${python:Depends}, ${misc:Depends}
|
||||
, moulinette (>= 3.7), ssowat (>= 3.7)
|
||||
, moulinette (>= 4.0.0~alpha), ssowat (>= 4.0.0~alpha)
|
||||
, python-psutil, python-requests, python-dnspython, python-openssl
|
||||
, python-apt, python-miniupnpc, python-dbus, python-jinja2
|
||||
, python-toml
|
||||
, apt, apt-transport-https
|
||||
, nginx, nginx-extras (>=1.6.2)
|
||||
, python-miniupnpc, python-dbus, python-jinja2
|
||||
, python-toml, python-packaging, python-publicsuffix
|
||||
, apt, apt-transport-https, dirmngr
|
||||
, php-fpm, php-ldap, php-intl
|
||||
, mariadb-server, php-mysql | php-mysqlnd
|
||||
, openssh-server, iptables, fail2ban, dnsutils, bind9utils
|
||||
|
@ -25,31 +24,29 @@ Depends: ${python:Depends}, ${misc:Depends}
|
|||
, dnsmasq, avahi-daemon, libnss-mdns, resolvconf, libnss-myhostname
|
||||
, postfix, postfix-ldap, postfix-policyd-spf-perl, postfix-pcre
|
||||
, dovecot-core, dovecot-ldap, dovecot-lmtpd, dovecot-managesieved, dovecot-antispam
|
||||
, rspamd (>= 1.6.0), opendkim-tools, postsrsd, procmail, mailutils
|
||||
, rspamd, opendkim-tools, postsrsd, procmail, mailutils
|
||||
, redis-server
|
||||
, metronome
|
||||
, git, curl, wget, cron, unzip, jq
|
||||
, metronome (>=3.14.0)
|
||||
, git, curl, wget, cron, unzip, jq, bc
|
||||
, lsb-release, haveged, fake-hwclock, equivs, lsof, whois, python-publicsuffix
|
||||
Recommends: yunohost-admin
|
||||
, ntp, inetutils-ping | iputils-ping
|
||||
, bash-completion, rsyslog
|
||||
, php-gd, php-curl, php-gettext, php-mcrypt
|
||||
, php-gd, php-curl, php-gettext
|
||||
, python-pip
|
||||
, unattended-upgrades
|
||||
, libdbd-ldap-perl, libnet-dns-perl
|
||||
Suggests: htop, vim, rsync, acpi-support-base, udisks2
|
||||
Conflicts: iptables-persistent
|
||||
, moulinette-yunohost, yunohost-config
|
||||
, yunohost-config-others, yunohost-config-postfix
|
||||
, yunohost-config-dovecot, yunohost-config-slapd
|
||||
, yunohost-config-nginx, yunohost-config-amavis
|
||||
, yunohost-config-mysql, yunohost-predepends
|
||||
, apache2, bind9
|
||||
Replaces: moulinette-yunohost, yunohost-config
|
||||
, yunohost-config-others, yunohost-config-postfix
|
||||
, yunohost-config-dovecot, yunohost-config-slapd
|
||||
, yunohost-config-nginx, yunohost-config-amavis
|
||||
, yunohost-config-mysql, yunohost-predepends
|
||||
, apache2
|
||||
, bind9
|
||||
, nginx-extras (>= 1.16)
|
||||
, openssl (>= 1.1.1g)
|
||||
, slapd (>= 2.4.49)
|
||||
, dovecot-core (>= 1:2.3.7)
|
||||
, redis-server (>= 5:5.0.7)
|
||||
, fail2ban (>= 0.11)
|
||||
, iptables (>= 1.8.3)
|
||||
Description: manageable and configured self-hosting server
|
||||
YunoHost aims to make self-hosting accessible to everyone. It configures
|
||||
an email, Web and IM server alongside a LDAP base. It also provides
|
||||
|
|
2
debian/install
vendored
2
debian/install
vendored
|
@ -8,11 +8,11 @@ data/other/yunoprompt.service /etc/systemd/system/
|
|||
data/other/password/* /usr/share/yunohost/other/password/
|
||||
data/other/dpkg-origins/yunohost /etc/dpkg/origins
|
||||
data/other/dnsbl_list.yml /usr/share/yunohost/other/
|
||||
data/other/ffdhe2048.pem /usr/share/yunohost/other/
|
||||
data/other/* /usr/share/yunohost/yunohost-config/moulinette/
|
||||
data/templates/* /usr/share/yunohost/templates/
|
||||
data/helpers /usr/share/yunohost/
|
||||
data/helpers.d/* /usr/share/yunohost/helpers.d/
|
||||
debian/conf/pam/* /usr/share/pam-configs/
|
||||
lib/metronome/modules/* /usr/lib/metronome/modules/
|
||||
locales/* /usr/lib/moulinette/yunohost/locales/
|
||||
src/yunohost /usr/lib/moulinette
|
||||
|
|
5
debian/postinst
vendored
5
debian/postinst
vendored
|
@ -29,11 +29,6 @@ do_configure() {
|
|||
|
||||
# Yunoprompt
|
||||
systemctl enable yunoprompt.service
|
||||
|
||||
# remove old PAM config and update it
|
||||
[[ ! -f /usr/share/pam-configs/my_mkhomedir ]] \
|
||||
|| rm /usr/share/pam-configs/my_mkhomedir
|
||||
pam-auth-update --package
|
||||
}
|
||||
|
||||
# summary of how this script can be called:
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
<h1>App helpers</h1>
|
||||
|
||||
<p>Doc auto-generated by <a href="https://github.com/YunoHost/yunohost/blob/stretch-unstable/doc/generate_helper_doc.py">this script</a> on {{data.date}} (Yunohost version {{data.version}})</p>
|
||||
|
||||
{% for category, helpers in data.helpers %}
|
||||
|
||||
<h3 style="text-transform: uppercase; font-weight: bold">{{ category }}</h3>
|
||||
|
@ -70,7 +72,7 @@
|
|||
</p>
|
||||
{% endif %}
|
||||
<p>
|
||||
<a href="https://github.com/YunoHost/yunohost/blob/stretch-unstable/data/helpers.d/{{ category }}#L{{ h.line + 1 }}">Dude, show me the code !</a>
|
||||
<a href="https://github.com/YunoHost/yunohost/blob/stretch-stable/data/helpers.d/{{ category }}#L{{ h.line + 1 }}">Dude, show me the code !</a>
|
||||
</p>
|
||||
|
||||
</div>
|
||||
|
@ -81,9 +83,6 @@
|
|||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
<p>Generated by <a href="https://github.com/YunoHost/yunohost/blob/stretch-unstable/doc/generate_helper_doc.py">this script</a> on {{data.date}} (Yunohost version {{data.version}})</p>
|
||||
|
||||
|
||||
<style>
|
||||
/*=================================================
|
||||
Helper card
|
||||
|
|
|
@ -228,7 +228,7 @@ function driver:stores(username, type, pattern)
|
|||
return nil, "not implemented";
|
||||
end
|
||||
|
||||
function driver:store_exists(username, datastore, type)
|
||||
function driver:store_exists(username, type)
|
||||
return nil, "not implemented";
|
||||
end
|
||||
|
||||
|
@ -236,7 +236,7 @@ function driver:purge(username)
|
|||
return nil, "not implemented";
|
||||
end
|
||||
|
||||
function driver:users()
|
||||
function driver:nodes(type)
|
||||
return nil, "not implemented";
|
||||
end
|
||||
|
||||
|
|
|
@ -164,7 +164,7 @@
|
|||
"diagnosis_basesystem_kernel": "هذا الخادم يُشغّل نواة لينكس {kernel_version}",
|
||||
"diagnosis_basesystem_ynh_single_version": "{package} الإصدار: {version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_main_version": "هذا الخادم يُشغّل YunoHost {main_version} ({repo})",
|
||||
"diagnosis_everything_ok": "كل شيء على ما يرام في {category}!",
|
||||
"diagnosis_everything_ok": "كل شيء يبدو على ما يرام في {category}!",
|
||||
"diagnosis_ip_connected_ipv4": "الخادم مُتّصل بالإنترنت عبر IPv4!",
|
||||
"diagnosis_ip_connected_ipv6": "الخادم مُتّصل بالإنترنت عبر IPv6!",
|
||||
"diagnosis_ip_not_connected_at_all": "يبدو أنّ الخادم غير مُتّصل بتاتا بالإنترنت!؟",
|
||||
|
@ -172,5 +172,16 @@
|
|||
"apps_already_up_to_date": "كافة التطبيقات مُحدّثة",
|
||||
"app_remove_after_failed_install": "جارٍ حذف التطبيق بعدما فشل تنصيبها…",
|
||||
"apps_catalog_updating": "جارٍ تحديث فهرس التطبيقات…",
|
||||
"apps_catalog_update_success": "تم تحديث فهرس التطبيقات!"
|
||||
"apps_catalog_update_success": "تم تحديث فهرس التطبيقات!",
|
||||
"diagnosis_domain_expiration_error": "ستنتهي مدة صلاحية بعض النطاقات في القريب العاجل!",
|
||||
"diagnosis_domain_expiration_warning": "ستنتهي مدة صلاحية بعض النطاقات قريبًا!",
|
||||
"diagnosis_ports_could_not_diagnose_details": "خطأ: {error}",
|
||||
"diagnosis_description_regenconf": "إعدادات النظام",
|
||||
"diagnosis_description_mail": "البريد الإلكتروني",
|
||||
"diagnosis_description_web": "الويب",
|
||||
"diagnosis_description_systemresources": "موارد النظام",
|
||||
"diagnosis_description_services": "حالة الخدمات",
|
||||
"diagnosis_description_dnsrecords": "تسجيلات خدمة DNS",
|
||||
"diagnosis_description_ip": "الإتصال بالإنترنت",
|
||||
"diagnosis_description_basesystem": "النظام الأساسي"
|
||||
}
|
||||
|
|
115
locales/ca.json
115
locales/ca.json
|
@ -100,7 +100,7 @@
|
|||
"backup_unable_to_organize_files": "No s'ha pogut utilitzar el mètode ràpid per organitzar els fitxers dins de l'arxiu",
|
||||
"backup_with_no_backup_script_for_app": "L'aplicació «{app:s}» no té un script de còpia de seguretat. Serà ignorat.",
|
||||
"backup_with_no_restore_script_for_app": "L'aplicació «{app:s}» no té un script de restauració, no podreu restaurar automàticament la còpia de seguretat d'aquesta aplicació.",
|
||||
"certmanager_acme_not_configured_for_domain": "El certificat pel domini «{domain:s}» sembla que no està instal·lat correctament. Si us plau executeu primer «cert-install» per aquest domini.",
|
||||
"certmanager_acme_not_configured_for_domain": "No s'ha pogut executar el ACME challenge pel domini {domain} en aquests moments ja que a la seva configuració de nginx li manca el codi corresponent… Assegureu-vos que la configuració nginx està actualitzada utilitzant «yunohost tools regen-conf nginx --dry-run --with-diff».",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "El certificat pel domini «{domain:s}» no ha estat emès per Let's Encrypt. No es pot renovar automàticament!",
|
||||
"certmanager_attempt_to_renew_valid_cert": "El certificat pel domini «{domain:s}» està a punt de caducar! (Utilitzeu --force si sabeu el que esteu fent)",
|
||||
"certmanager_attempt_to_replace_valid_cert": "Esteu intentant sobreescriure un certificat correcte i vàlid pel domini {domain:s}! (Utilitzeu --force per ometre)",
|
||||
|
@ -113,8 +113,8 @@
|
|||
"certmanager_conflicting_nginx_file": "No s'ha pogut preparar el domini per al desafiament ACME: l'arxiu de configuració NGINX {filepath:s} entra en conflicte i s'ha d'eliminar primer",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "S'ha exhaurit el temps d'esperar al intentar recollir el certificat intermedi des de Let's Encrypt. La instal·lació/renovació del certificat s'ha cancel·lat - torneu a intentar-ho més tard.",
|
||||
"certmanager_domain_cert_not_selfsigned": "El certificat pel domini {domain:s} no és auto-signat Esteu segur de voler canviar-lo? (Utilitzeu «--force» per fer-ho)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "El registre DNS \"A\" pel domini «{domain:s}» és diferent a l'adreça IP d'aquest servidor. Si heu modificat recentment el registre A, si us plau espereu a que es propagui (hi ha eines per verificar la propagació disponibles a internet). (Si sabeu el que esteu fent, podeu utilitzar «--no-checks» per desactivar aquestes comprovacions.)",
|
||||
"certmanager_domain_http_not_working": "Sembla que el domini {domain:s} no és accessible via HTTP. Verifiqueu que les configuracions DNS i NGINX siguin correctes",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "Els registres DNS pel domini «{domain:s}» són diferents a l'adreça IP d'aquest servidor. Mireu la categoria «registres DNS» (bàsic) al diagnòstic per a més informació. Si heu modificat recentment el registre A, si us plau espereu a que es propagui (hi ha eines per verificar la propagació disponibles a internet). (Si sabeu el que esteu fent, podeu utilitzar «--no-checks» per desactivar aquestes comprovacions.)",
|
||||
"certmanager_domain_http_not_working": "El domini {domain:s} sembla que no és accessible via HTTP. Verifiqueu la categoria «Web» en el diagnòstic per a més informació. (Si sabeu el que esteu fent, utilitzeu «--no-checks» per deshabilitar les comprovacions.)",
|
||||
"certmanager_domain_unknown": "Domini desconegut «{domain:s}»",
|
||||
"certmanager_error_no_A_record": "No s'ha trobat cap registre DNS «A» per «{domain:s}». Heu de fer que el vostre nom de domini apunti cap a la vostra màquina per tal de poder instal·lar un certificat Let's Encrypt. (Si sabeu el que esteu fent, podeu utilitzar «--no-checks» per desactivar aquestes comprovacions.)",
|
||||
"certmanager_hit_rate_limit": "S'han emès massa certificats recentment per aquest mateix conjunt de dominis {domain:s}. Si us plau torneu-ho a intentar més tard. Consulteu https://letsencrypt.org/docs/rate-limits/ per obtenir més detalls",
|
||||
|
@ -127,7 +127,7 @@
|
|||
"confirm_app_install_thirdparty": "PERILL! Aquesta aplicació no es part del catàleg d'aplicacions de YunoHost. La instal·lació d'aplicacions de terceres parts pot comprometre la integritat i seguretat del seu sistema. No hauríeu d'instal·lar-ne a no ser que sapigueu el que feu. No obtindreu CAP AJUDA si l'aplicació no funciona o trenca el sistema… Si accepteu el risc, escriviu «{answers:s}»",
|
||||
"custom_app_url_required": "Heu de especificar una URL per actualitzar la vostra aplicació personalitzada {app:s}",
|
||||
"admin_password_too_long": "Trieu una contrasenya de menys de 127 caràcters",
|
||||
"dpkg_is_broken": "No es pot fer això en aquest instant perquè dpkg/APT (els gestors de paquets del sistema) sembla estar mal configurat… Podeu intentar solucionar-ho connectant-vos per SSH i executant «sudo dpkg --configure -a».",
|
||||
"dpkg_is_broken": "No es pot fer això en aquest instant perquè dpkg/APT (els gestors de paquets del sistema) sembla estar mal configurat… Podeu intentar solucionar-ho connectant-vos per SSH i executant «sudo apt install --fix-broken» i/o «sudo dpkg --configure -a».",
|
||||
"domain_cannot_remove_main": "No es pot eliminar «{domain:s}» ja que és el domini principal, primer s'ha d'establir un nou domini principal utilitzant «yunohost domain main-domain -n <un-altre-domini>»; aquí hi ha una llista dels possibles dominis: {other_domains:s}",
|
||||
"domain_cert_gen_failed": "No s'ha pogut generar el certificat",
|
||||
"domain_created": "S'ha creat el domini",
|
||||
|
@ -140,7 +140,7 @@
|
|||
"domain_dyndns_already_subscribed": "Ja us heu subscrit a un domini DynDNS",
|
||||
"domain_dyndns_root_unknown": "Domini DynDNS principal desconegut",
|
||||
"domain_hostname_failed": "No s'ha pogut establir un nou nom d'amfitrió. Això podria causar problemes més tard (podria no passar res).",
|
||||
"domain_uninstall_app_first": "Hi ha una o més aplicacions instal·lades en aquest domini. Desinstal·leu les abans d'eliminar el domini",
|
||||
"domain_uninstall_app_first": "Aquestes aplicacions encara estan instal·lades en el vostre domini: {apps}. Desinstal·leu les abans d'eliminar el domini",
|
||||
"domain_unknown": "Domini desconegut",
|
||||
"domains_available": "Dominis disponibles:",
|
||||
"done": "Fet",
|
||||
|
@ -390,7 +390,7 @@
|
|||
"ssowat_conf_updated": "S'ha actualitzat la configuració SSOwat",
|
||||
"system_upgraded": "S'ha actualitzat el sistema",
|
||||
"system_username_exists": "El nom d'usuari ja existeix en la llista d'usuaris de sistema",
|
||||
"this_action_broke_dpkg": "Aquesta acció a trencat dpkg/APT (els gestors de paquets del sistema)… Podeu intentar resoldre el problema connectant-vos amb SSH i executant «sudo dpkg --configure -a».",
|
||||
"this_action_broke_dpkg": "Aquesta acció a trencat dpkg/APT (els gestors de paquets del sistema)… Podeu intentar resoldre el problema connectant-vos amb SSH i executant «sudo apt install --fix-broken» i/o «sudo dpkg --configure -a».",
|
||||
"tools_upgrade_at_least_one": "Especifiqueu «--apps», o «--system»",
|
||||
"tools_upgrade_cant_both": "No es poden actualitzar tant el sistema com les aplicacions al mateix temps",
|
||||
"tools_upgrade_cant_hold_critical_packages": "No es poden mantenir els paquets crítics…",
|
||||
|
@ -504,7 +504,7 @@
|
|||
"diagnosis_basesystem_ynh_main_version": "El servidor funciona amb YunoHost {main_version} ({repo})",
|
||||
"diagnosis_ram_low": "El sistema només té {available} ({available_percent}%) de memòria RAM disponibles d'un total de {total}. Aneu amb compte.",
|
||||
"diagnosis_swap_none": "El sistema no té swap. Hauríeu de considerar afegir un mínim de {recommended} de swap per evitar situacions en les que el sistema es queda sense memòria.",
|
||||
"diagnosis_regenconf_manually_modified": "El fitxer de configuració {file} ha estat modificat manualment.",
|
||||
"diagnosis_regenconf_manually_modified": "El fitxer de configuració <code>{file}</code> sembla haver estat modificat manualment.",
|
||||
"diagnosis_security_vulnerable_to_meltdown_details": "Per arreglar-ho, hauríeu d'actualitzar i reiniciar el sistema per tal de carregar el nou nucli de linux (o contactar amb el proveïdor del servidor si no funciona). Vegeu https://meltdownattack.com/ per a més informació.",
|
||||
"diagnosis_http_could_not_diagnose": "No s'ha pogut diagnosticar si el domini és accessible des de l'exterior.",
|
||||
"diagnosis_http_could_not_diagnose_details": "Error: {error}",
|
||||
|
@ -531,23 +531,23 @@
|
|||
"diagnosis_ip_not_connected_at_all": "Sembla que el servidor no està connectat a internet!?",
|
||||
"diagnosis_ip_dnsresolution_working": "La resolució de nom de domini està funcionant!",
|
||||
"diagnosis_ip_broken_dnsresolution": "La resolució de nom de domini falla per algun motiu… Està el tallafocs bloquejant les peticions DNS?",
|
||||
"diagnosis_ip_broken_resolvconf": "La resolució de nom de domini sembla caiguda en el servidor, podria estar relacionat amb el fet que /etc/resolv.conf no apunta cap a 127.0.0.1.",
|
||||
"diagnosis_ip_weird_resolvconf": "La resolució DNS sembla estar funcionant, però aneu amb compte ja que esteu utilitzant un versió personalitzada de /etc/resolv.conf.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "En canvi, aquest fitxer hauria de ser un enllaç simbòlic cap a /etc/resolvconf/run/resolv.conf i que aquest apunti cap a 127.0.0.1 (dnsmasq). La configuració del «resolver» real s'hauria de fer a /etc/resolv.dnsmaq.conf.",
|
||||
"diagnosis_dns_good_conf": "Bona configuració DNS pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_bad_conf": "Configuració DNS incorrecta o inexistent pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_missing_record": "Segons la configuració DNS recomanada, hauríeu d'afegir un registre DNS\ntipus: {type}\nnom: {name}\nvalor: {value}.",
|
||||
"diagnosis_dns_discrepancy": "El registre DNS de tipus {type} i nom {name} no concorda amb la configuració recomanada.\nValor actual: {current}\nValor esperat: {value}",
|
||||
"diagnosis_ip_broken_resolvconf": "La resolució de nom de domini sembla caiguda en el servidor, podria estar relacionat amb el fet que <code>/etc/resolv.conf</code> no apunta cap a <code>127.0.0.1</code>.",
|
||||
"diagnosis_ip_weird_resolvconf": "La resolució DNS sembla estar funcionant, però sembla que esteu utilitzant un versió personalitzada de <code>/etc/resolv.conf</code>.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "El fitxer <code>etc/resolv.conf</code> hauria de ser un enllaç simbòlic cap a <code>/etc/resolvconf/run/resolv.conf</code> i que aquest apunti cap a <code>127.0.0.1</code> (dnsmasq). La configuració del «resolver» real s'hauria de fer a <code>/etc/resolv.dnsmaq.conf</code>.",
|
||||
"diagnosis_dns_good_conf": "Els registres DNS han estat correctament configurats pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_bad_conf": "Alguns registres DNS són incorrectes o no existeixen pel domini {domain} (categoria {category})",
|
||||
"diagnosis_dns_missing_record": "Segons la configuració DNS recomanada, hauríeu d'afegir un registre DNS amb la següent informació.<br>Tipus: <code>{type}</code><br>Nom: <code>{name}</code><br>Valor: <code>{value}</code>",
|
||||
"diagnosis_dns_discrepancy": "La configuració DNS següent sembla que no segueix la configuració recomanada: <br>Tipus: <code>{type}</code><br>Nom: <code>{name}</code><br>Valor actual: <code>{current}</code><br>Valor esperat: <code>{value}</code>",
|
||||
"diagnosis_services_bad_status": "El servei {service} està {status} :(",
|
||||
"diagnosis_diskusage_verylow": "El lloc d'emmagatzematge {mountpoint} (en l'aparell {device}) només té disponibles {free} ({free_percent}%). Hauríeu de considerar alliberar una mica d'espai.",
|
||||
"diagnosis_diskusage_low": "El lloc d'emmagatzematge {mountpoint} (en l'aparell {device}) només té disponibles {free} ({free_percent}%). Aneu amb compte.",
|
||||
"diagnosis_diskusage_ok": "El lloc d'emmagatzematge {mountpoint} (en l'aparell {device}) encara té {free} ({free_percent}%) lliures!",
|
||||
"diagnosis_diskusage_verylow": "El lloc d'emmagatzematge <code>{mountpoint}</code> (en l'aparell <code>{device}</code>) només té disponibles {free} ({free_percent}%). Hauríeu de considerar alliberar una mica d'espai!",
|
||||
"diagnosis_diskusage_low": "El lloc d'emmagatzematge <code>{mountpoint}</code> (en l'aparell <code>{device}</code>) només té disponibles {free} ({free_percent}%). Aneu amb compte.",
|
||||
"diagnosis_diskusage_ok": "El lloc d'emmagatzematge <code>{mountpoint}</code> (en l'aparell <code>{device}</code>) encara té {free} ({free_percent}%) lliures!",
|
||||
"diagnosis_ram_verylow": "El sistema només té {available} ({available_percent}%) de memòria RAM disponibles! (d'un total de {total})",
|
||||
"diagnosis_ram_ok": "El sistema encara té {available} ({available_percent}%) de memòria RAM disponibles d'un total de {total}.",
|
||||
"diagnosis_swap_notsomuch": "El sistema només té {total} de swap. Hauríeu de considerar tenir un mínim de {recommended} per evitar situacions en les que el sistema es queda sense memòria.",
|
||||
"diagnosis_swap_ok": "El sistema té {total} de swap!",
|
||||
"diagnosis_regenconf_allgood": "Tots els fitxers de configuració estan en acord amb la configuració recomanada!",
|
||||
"diagnosis_regenconf_manually_modified_details": "No hauria de ser cap problema sempre i quan sapigueu el que esteu fent ;) !",
|
||||
"diagnosis_regenconf_manually_modified_details": "No hauria de ser cap problema sempre i quan sapigueu el que esteu fent! YunoHost deixarà d'actualitzar aquest fitxer de manera automàtica… Però tingueu en compte que les actualitzacions de YunoHost podrien tenir canvis recomanats importants. Si voleu podeu mirar les diferències amb <cmd>yunohost tools regen-conf {category} --dry-run --with-diff</cmd> i forçar el restabliment de la configuració recomanada amb <cmd>yunohost tools regen-conf {category} --force</cmd>",
|
||||
"diagnosis_regenconf_manually_modified_debian": "El fitxer de configuració {file} ha estat modificat manualment respecte al fitxer per defecte de Debian.",
|
||||
"diagnosis_regenconf_manually_modified_debian_details": "No hauria de ser cap problema, però ho haureu de vigilar...",
|
||||
"diagnosis_security_all_good": "No s'ha trobat cap vulnerabilitat de seguretat crítica.",
|
||||
|
@ -577,11 +577,11 @@
|
|||
"diagnosis_description_mail": "Correu electrònic",
|
||||
"migration_description_0013_futureproof_apps_catalog_system": "Migrar al nou sistema de catàleg d'aplicacions resistent al pas del temps",
|
||||
"app_upgrade_script_failed": "Hi ha hagut un error en el script d'actualització de l'aplicació",
|
||||
"diagnosis_services_bad_status_tip": "Podeu intentar reiniciar el servei, i si no funciona, podeu mirar els registres del servei utilitzant «yunohost service log {service}» o a través de «Serveis» a la secció de la pàgina web d'administració.",
|
||||
"diagnosis_ports_forwarding_tip": "Per arreglar aquest problema, segurament s'ha de configurar el reenviament de ports en el router tal i com s'explica a https://yunohost.org/isp_box_config",
|
||||
"diagnosis_http_bad_status_code": "El sistema de diagnòstic no ha pogut connectar amb el servidor. Podria ser que una altra màquina hagi contestat en lloc del servidor. S'hauria de comprovar que el reenviament del port 80 sigui correcte, que la configuració NGINX està actualitzada i que el reverse-proxy no està interferint.",
|
||||
"diagnosis_services_bad_status_tip": "Podeu intentar <a href='#/services/{service}'>reiniciar el servei</a>, i si no funciona, podeu mirar <a href='#/services/{service}'>els registres a la pàgina web d'administració</a> (des de la línia de comandes, ho podeu fer utilitzant <cmd>yunohost service restart {service}</cmd> i <cmd>yunohost service log {service}</cmd>).",
|
||||
"diagnosis_ports_forwarding_tip": "Per arreglar aquest problema, segurament s'ha de configurar el reenviament de ports en el router tal i com s'explica a <a href='https://yunohost.org/isp_box_config'>https://yunohost.org/isp_box_config</a>",
|
||||
"diagnosis_http_bad_status_code": "Sembla que una altra màquina (potser el router) a respost en lloc del vostre servidor.<br>1. La causa més probable per a aquest problema és que el port 80 (i 443) <a href='https://yunohost.org/isp_box_config'>no reenvien correctament cap al vostre servidor</a>.<br>2. En configuracions més complexes: assegureu-vos que no hi ha cap tallafoc o reverse-proxy interferint.",
|
||||
"diagnosis_no_cache": "Encara no hi ha memòria cau pel diagnòstic de la categoria «{category}»",
|
||||
"diagnosis_http_timeout": "S'ha exhaurit el temps d'esperar intentant connectar amb el servidor des de l'exterior. Sembla que no s'hi pot accedir. S'hauria de comprovar que el reenviament del port 80 és correcte, que NGINX funciona, i que el tallafocs no està interferint.",
|
||||
"diagnosis_http_timeout": "S'ha exhaurit el temps d'esperar intentant connectar amb el servidor des de l'exterior.<br>1. La causa més probable per a aquest problema és que el port 80 (i 443) <a href='https://yunohost.org/isp_box_config'>no reenvien correctament cap al vostre servidor</a>.<br>2. També us hauríeu d'assegurar que el servei nginx estigui funcionant<br>3. En configuracions més complexes: assegureu-vos que no hi ha cap tallafoc o reverse-proxy interferint.",
|
||||
"diagnosis_http_connection_error": "Error de connexió: no s'ha pogut connectar amb el domini demanat, segurament és inaccessible.",
|
||||
"yunohost_postinstall_end_tip": "S'ha completat la post-instal·lació. Per acabar la configuració, considereu:\n - afegir un primer usuari a través de la secció «Usuaris» a la pàgina web d'administració (o emprant «yunohost user create <username>» a la línia d'ordres);\n - diagnosticar possibles problemes a través de la secció «Diagnòstics» a la pàgina web d'administració (o emprant «yunohost diagnosis run» a la línia d'ordres);\n - llegir les seccions «Finalizing your setup» i «Getting to know Yunohost» a la documentació per administradors: https://yunohost.org/admindoc.",
|
||||
"migration_description_0014_remove_app_status_json": "Eliminar els fitxers d'aplicació status.json heretats",
|
||||
|
@ -598,5 +598,74 @@
|
|||
"diagnosis_basesystem_hardware": "L'arquitectura del maquinari del servidor és {virt} {arch}",
|
||||
"group_already_exist_on_system_but_removing_it": "El grup {group} ja existeix en els grups del sistema, però YunoHost l'eliminarà…",
|
||||
"certmanager_warning_subdomain_dns_record": "El subdomini «{subdomain:s}» no resol a la mateixa adreça IP que «{domain:s}». Algunes funcions no estaran disponibles fins que no s'hagi arreglat i s'hagi regenerat el certificat.",
|
||||
"domain_cannot_add_xmpp_upload": "No podeu afegir dominis començant per «xmpp-upload.». Aquest tipus de nom està reservat per a la funció de pujada de XMPP integrada a YunoHost."
|
||||
"domain_cannot_add_xmpp_upload": "No podeu afegir dominis començant per «xmpp-upload.». Aquest tipus de nom està reservat per a la funció de pujada de XMPP integrada a YunoHost.",
|
||||
"diagnosis_display_tip": "Per veure els problemes que s'han trobat, podeu anar a la secció de Diagnòstic a la pàgina web d'administració, o utilitzar « yunohost diagnostic show --issues » a la línia de comandes.",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Alguns proveïdors no permeten desbloquejar el port de sortida 25 perquè no els hi importa la Neutralitat de la Xarxa.<br> - Alguns d'ells ofereixen l'alternativa d'<a href='https://yunohost.org/#/smtp_relay'>utilitzar un relay de servidor de correu electrònic</a> tot i que implica que el relay serà capaç d'espiar el tràfic de correus electrònics.<br>- Una alternativa respectuosa amb la privacitat és utilitzar una VPN *amb una IP pública dedicada* per sortejar aquest tipus de limitació. Vegeu <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a><br>- També podeu considerar canviar-vos a <a href='https://yunohost.org/#/isp'>un proveïdor més respectuós de la neutralitat de la xarxa</a>",
|
||||
"diagnosis_ip_global": "IP global: <code>{global}</code>",
|
||||
"diagnosis_ip_local": "IP local: <code>{local}</code>",
|
||||
"diagnosis_dns_point_to_doc": "Consulteu la documentació a <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si necessiteu ajuda per configurar els registres DNS.",
|
||||
"diagnosis_mail_outgoing_port_25_ok": "El servidor de correu electrònic SMTP pot enviar correus electrònics (el port de sortida 25 no està bloquejat).",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_details": "Primer heu d'intentar desbloquejar el port 25 en la interfície del vostre router o en la interfície del vostre allotjador. (Alguns proveïdors d'allotjament demanen enviar un tiquet de suport en aquests casos).",
|
||||
"diagnosis_mail_ehlo_ok": "El servidor de correu electrònic SMTP no és accessible des de l'exterior i per tant no pot rebre correus electrònics!",
|
||||
"diagnosis_mail_ehlo_unreachable": "El servidor de correu electrònic SMTP no és accessible des de l'exterior amb IPv{ipversion}. No podrà rebre correus electrònics.",
|
||||
"diagnosis_mail_ehlo_bad_answer": "Un servei no SMTP a respost en el port 25 amb IPv{ipversion}",
|
||||
"diagnosis_mail_ehlo_bad_answer_details": "Podria ser que sigui per culpa d'una altra màquina responent en lloc del servidor.",
|
||||
"diagnosis_mail_ehlo_wrong": "Un servidor de correu electrònic SMTP diferent respon amb IPv{ipversion}. És probable que el vostre servidor no pugui rebre correus electrònics.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose": "No s'ha pogut diagnosticar si el servidor de correu electrònic postfix és accessible des de l'exterior amb IPv{ipversion}.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose_details": "Error: {error}",
|
||||
"diagnosis_mail_fcrdns_ok": "S'ha configurat correctament el servidor DNS invers!",
|
||||
"diagnosis_mail_blacklist_ok": "Sembla que les IPs i el dominis d'aquest servidor no són en una llista negra",
|
||||
"diagnosis_mail_blacklist_listed_by": "La vostra IP o domini <code>{item}</code> està en una llista negra a {blacklist_name}",
|
||||
"diagnosis_mail_blacklist_reason": "El motiu de ser a la llista negra és: {reason}",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain": "El DNS invers no està correctament configurat amb IPv{ipversion}. Alguns correus electrònics poden no arribar al destinatari o ser marcats com correu brossa.",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "DNS invers actual: <code>{rdns_domain}</code><br>Valor esperat: <code>{ehlo_domain}</code>",
|
||||
"diagnosis_mail_queue_ok": "{nb_pending} correus electrònics pendents en les cues de correu electrònic",
|
||||
"diagnosis_mail_queue_unavailable": "No s'ha pogut consultar el nombre de correus electrònics pendents en la cua",
|
||||
"diagnosis_mail_queue_unavailable_details": "Error: {error}",
|
||||
"diagnosis_mail_queue_too_big": "Hi ha massa correus electrònics pendents en la cua ({nb_pending} correus electrònics)",
|
||||
"diagnosis_http_hairpinning_issue": "Sembla que la vostra xarxa no té el hairpinning activat.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date": "La configuració NGINX d'aquest domini sembla que ha estat modificada manualment, i no deixa que YunoHost diagnostiqui si és accessible amb HTTP.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date_details": "Per arreglar el problema, mireu les diferències amb la línia d'ordres utilitzant <cmd>yunohost tools regen-conf nginx --dry-run --with-diff</cmd> i si els canvis us semblen bé els podeu fer efectius utilitzant <cmd>yunohost tools regen-conf nginx --force</cmd>.",
|
||||
"global_settings_setting_smtp_allow_ipv6": "Permet l'ús de IPv6 per rebre i enviar correus electrònics",
|
||||
"diagnosis_mail_ehlo_unreachable_details": "No s'ha pogut establir una connexió amb el vostre servidor en el port 25 amb IPv{ipversion}. Sembla que el servidor no és accessible.<br>1. La causa més comú per aquest problema és que el port 25 <a href='https://yunohost.org/isp_box_config'>no està correctament redireccionat cap al vostre servidor</a>.<br>2. També us hauríeu d'assegurar que el servei postfix estigui funcionant.<br>3. En configuracions més complexes: assegureu-vos que que no hi hagi cap tallafoc ni reverse-proxy interferint.",
|
||||
"diagnosis_mail_ehlo_wrong_details": "El EHLO rebut pel servidor de diagnòstic remot amb IPv{ipversion} és diferent al domini del vostre servidor.<br>EHLO rebut: <code>{wrong_ehlo}</code><br>Esperat: <code>{right_ehlo}</code><br>La causa més habitual d'aquest problema és que el port 25 <a href='https://yunohost.org/isp_box_config'>no està correctament reenviat cap al vostre servidor</a>. També podeu comprovar que no hi hagi un tallafocs o un reverse-proxy interferint.",
|
||||
"diagnosis_mail_fcrdns_dns_missing": "No hi ha cap DNS invers definit per IPv{ipversion}. Alguns correus electrònics poden no entregar-se o poden ser marcats com a correu brossa.",
|
||||
"diagnosis_mail_blacklist_website": "Després d'haver identificat perquè estàveu llistats i haver arreglat el problema, no dubteu en demanar que la vostra IP o domini sigui eliminat de {blacklist_website}",
|
||||
"diagnosis_ports_partially_unreachable": "El port {port} no és accessible des de l'exterior amb IPv{failed}.",
|
||||
"diagnosis_http_partially_unreachable": "El domini {domain} sembla que no és accessible utilitzant HTTP des de l'exterior de la xarxa local amb IPv{failed}, tot i que funciona amb IPv{passed}.",
|
||||
"diagnosis_mail_fcrdns_nok_details": "Hauríeu d'intentar configurar primer el DNS invers amb <code>{ehlo_domain}</code> en la interfície del router o en la interfície del vostre allotjador. (Alguns allotjadors requereixen que obris un informe de suport per això).",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_4": "Alguns proveïdors no permeten configurar el DNS invers (o aquesta funció pot no funcionar…). Si teniu problemes a causa d'això, considereu les solucions següents:<br> - Alguns proveïdors d'accés a internet (ISP) donen l'alternativa de <a href='https://yunohost.org/#/smtp_relay'> utilitzar un relay de servidor de correu electrònic</a> tot i que implica que el relay podrà espiar el trànsit de correus electrònics.<br>- Una alternativa respectuosa amb la privacitat és utilitzar una VPN *amb una IP pública dedicada* per sobrepassar aquest tipus de limitacions. Mireu <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a><br>- O es pot <a href='https://yunohost.org/#/isp'>canviar a un proveïdor diferent</a>",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_6": "Alguns proveïdors no permeten configurar el vostre DNS invers (o la funció no els hi funciona…). Si el vostre DNS invers està correctament configurat per IPv4, podeu intentar deshabilitar l'ús de IPv6 per a enviar correus electrònics utilitzant <cmd>yunohost settings set smtp.allow_ipv6 -v off</cmd>. Nota: aquesta última solució implica que no podreu enviar o rebre correus electrònics cap a els pocs servidors que hi ha que només tenen IPv-6.",
|
||||
"diagnosis_http_hairpinning_issue_details": "Això és probablement a causa del router del vostre proveïdor d'accés a internet. El que fa, que gent de fora de la xarxa local pugui accedir al servidor sense problemes, però no la gent de dins la xarxa local (com vostè probablement) quan s'utilitza el nom de domini o la IP global. Podreu segurament millorar la situació fent una ullada a <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a>",
|
||||
"backup_archive_cant_retrieve_info_json": "No s'ha pogut carregar la informació de l'arxiu «{archive}»… No s'ha pogut obtenir el fitxer info.json (o no és un fitxer json vàlid).",
|
||||
"backup_archive_corrupted": "Sembla que l'arxiu de la còpia de seguretat «{archive}» està corromput : {error}",
|
||||
"certmanager_domain_not_diagnosed_yet": "Encara no hi ha cap resultat de diagnòstic per al domini %s. Torneu a executar el diagnòstic per a les categories «Registres DNS» i «Web» en la secció de diagnòstic per comprovar que el domini està preparat per a Let's Encrypt. (O si sabeu el que esteu fent, utilitzant «--no-checks» per deshabilitar les comprovacions.)",
|
||||
"diagnosis_ip_no_ipv6_tip": "Utilitzar una IPv6 no és obligatori per a que funcioni el servidor, però és millor per la salut d'Internet en conjunt. La IPv6 hauria d'estar configurada automàticament pel sistema o pel proveïdor si està disponible. Si no és el cas, pot ser necessari configurar alguns paràmetres més de forma manual tal i com s'explica en la documentació disponible aquí: <a href='https://yunohost.org/#/ipv6'>https://yunohost.org/#/ipv6</a>. Si no podeu habilitar IPv6 o us sembla massa tècnic, podeu ignorar aquest avís sense problemes.",
|
||||
"diagnosis_domain_expiration_not_found": "No s'ha pogut comprovar la data d'expiració d'alguns dominis",
|
||||
"diagnosis_domain_not_found_details": "El domini {domain} no existeix en la base de dades WHOIS o ha expirat!",
|
||||
"diagnosis_domain_expiration_not_found_details": "La informació WHOIS pel domini {domain} sembla que no conté informació sobre la data d'expiració?",
|
||||
"diagnosis_domain_expiration_success": "Els vostres dominis estan registrats i no expiraran properament.",
|
||||
"diagnosis_domain_expiration_warning": "Alguns dominis expiraran properament!",
|
||||
"diagnosis_domain_expiration_error": "Alguns dominis expiraran EN BREUS!",
|
||||
"diagnosis_domain_expires_in": "{domain} expirarà en {days} dies.",
|
||||
"diagnosis_swap_tip": "Vigileu i tingueu en compte que els servidor està allotjant memòria d'intercanvi en una targeta SD o en l'emmagatzematge SSD, això pot reduir dràsticament l'esperança de vida del dispositiu.",
|
||||
"restore_already_installed_apps": "No s'han pogut restaurar les següents aplicacions perquè ja estan instal·lades: {apps}",
|
||||
"app_packaging_format_not_supported": "No es pot instal·lar aquesta aplicació ja que el format del paquet no és compatible amb la versió de YunoHost del sistema. Hauríeu de considerar actualitzar el sistema.",
|
||||
"diagnosis_dns_try_dyndns_update_force": "La configuració DNS d'aquest domini hauria de ser gestionada automàticament per YunoHost. Si aquest no és el cas, podeu intentar forçar-ne l'actualització utilitzant <cmd>yunohost dyndns update --force</cmd>.",
|
||||
"migration_0015_cleaning_up": "Netejant la memòria cau i els paquets que ja no són necessaris…",
|
||||
"migration_0015_specific_upgrade": "Començant l'actualització dels paquets del sistema que s'han d'actualitzar de forma independent…",
|
||||
"migration_0015_modified_files": "Tingueu en compte que s'han trobat els següents fitxers que es van modificar manualment i podria ser que es sobreescriguin durant l'actualització: {manually_modified_files}",
|
||||
"migration_0015_problematic_apps_warning": "Tingueu en compte que s'han trobat les següents aplicacions que podrien ser problemàtiques. Sembla que aquestes aplicacions no s'han instal·lat des del catàleg d'aplicacions de YunoHost, o no estan marcades com «funcionant». En conseqüència, no es pot garantir que segueixin funcionant després de l'actualització: {problematic_apps}",
|
||||
"migration_0015_general_warning": "Tingueu en compte que aquesta migració és una operació delicada. L'equip de YunoHost ha fet tots els possibles per revisar i testejar, però tot i això podria ser que la migració trenqui alguna part del sistema o algunes aplicacions.\n\nPer tant, està recomana:\n - Fer una còpia de seguretat de totes les dades o aplicacions crítiques. Més informació a https://yunohost.org/backup;\n - Ser pacient un cop comenci la migració: en funció de la connexió Internet i del maquinari, podria estar unes hores per actualitzar-ho tot.",
|
||||
"migration_0015_system_not_fully_up_to_date": "El sistema no està completament al dia. Heu de fer una actualització normal abans de fer la migració a Buster.",
|
||||
"migration_0015_not_enough_free_space": "Hi ha poc espai lliure a /var/! HI hauria d'haver un mínim de 1GB lliure per poder fer aquesta migració.",
|
||||
"migration_0015_not_stretch": "La distribució actual de Debian no és Stretch!",
|
||||
"migration_0015_yunohost_upgrade": "Començant l'actualització del nucli de YunoHost…",
|
||||
"migration_0015_still_on_stretch_after_main_upgrade": "Alguna cosa ha anat malament durant la actualització principal, sembla que el sistema encara està en Debian Stretch",
|
||||
"migration_0015_main_upgrade": "Començant l'actualització principal…",
|
||||
"migration_0015_patching_sources_list": "Apedaçament de source.lists…",
|
||||
"migration_0015_start": "Començant la migració a Buster",
|
||||
"migration_description_0015_migrate_to_buster": "Actualitza els sistema a Debian Buster i YunoHost 4.x",
|
||||
"regenconf_need_to_explicitly_specify_ssh": "La configuració ssh ha estat modificada manualment, però heu d'especificar explícitament la categoria «ssh» amb --force per fer realment els canvis.",
|
||||
"migration_0015_weak_certs": "S'han trobat els següents certificats que encara utilitzen algoritmes de signatura febles i s'han d'actualitzar per a ser compatibles amb la propera versió de nginx: {certs}"
|
||||
}
|
||||
|
|
|
@ -167,23 +167,23 @@
|
|||
"certmanager_attempt_to_replace_valid_cert": "Du versuchst gerade eine richtiges und gültiges Zertifikat der Domain {domain:s} zu überschreiben! (Benutze --force , um diese Nachricht zu umgehen)",
|
||||
"certmanager_domain_unknown": "Unbekannte Domain '{domain:s}'",
|
||||
"certmanager_domain_cert_not_selfsigned": "Das Zertifikat der Domain {domain:s} ist kein selbstsigniertes Zertifikat. Bist du dir sicher, dass du es ersetzen willst? (Benutze dafür '--force')",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Es scheint so als wäre die Aktivierung des Zertifikats für die Domain {domain:s} fehlgeschlagen...",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Die Aktivierung des neuen Zertifikats für die Domain {domain:s} ist fehlgeschlagen…",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "Das Zertifikat der Domain '{domain:s}' wurde nicht von Let's Encrypt ausgestellt. Es kann nicht automatisch erneuert werden!",
|
||||
"certmanager_attempt_to_renew_valid_cert": "Das Zertifikat der Domain {domain:s} läuft nicht in Kürze ab! (Benutze --force um diese Nachricht zu umgehen)",
|
||||
"certmanager_domain_http_not_working": "Es scheint so, dass die Domain {domain:s} nicht über HTTP erreicht werden kann. Bitte überprüfe, ob deine DNS und nginx Konfiguration in Ordnung ist",
|
||||
"certmanager_error_no_A_record": "Kein DNS 'A' Eintrag für die Domain {domain:s} gefunden. Dein Domainname muss auf diese Maschine weitergeleitet werden, um ein Let's Encrypt Zertifikat installieren zu können! (Wenn du weißt was du tust, kannst du --no-checks benutzen, um diese Überprüfung zu überspringen. )",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "Der DNS 'A' Eintrag der Domain {domain:s} unterscheidet sich von dieser Server-IP. Wenn du gerade deinen A Eintrag verändert hast, warte bitte etwas, damit die Änderungen wirksam werden (du kannst die DNS Propagation mittels Website überprüfen) (Wenn du weißt was du tust, kannst du --no-checks benutzen, um diese Überprüfung zu überspringen. )",
|
||||
"certmanager_cannot_read_cert": "Es ist ein Fehler aufgetreten, als es versucht wurde das aktuelle Zertifikat für die Domain {domain:s} zu öffnen (Datei: {file:s}), Grund: {reason:s}",
|
||||
"certmanager_cert_install_success_selfsigned": "Ein selbstsigniertes Zertifikat für die Domain {domain:s} wurde erfolgreich installiert!",
|
||||
"certmanager_cert_install_success": "Für die Domain {domain:s} wurde erfolgreich ein Let's Encrypt installiert!",
|
||||
"certmanager_cert_renew_success": "Das Let's Encrypt Zertifikat für die Domain {domain:s} wurde erfolgreich erneuert!",
|
||||
"certmanager_cert_install_success_selfsigned": "Ein selbstsigniertes Zertifikat für die Domain {domain:s} wurde erfolgreich installiert",
|
||||
"certmanager_cert_install_success": "Für die Domain {domain:s} wurde erfolgreich ein Let's Encrypt Zertifikat installiert.",
|
||||
"certmanager_cert_renew_success": "Das Let's Encrypt Zertifikat für die Domain {domain:s} wurde erfolgreich erneuert.",
|
||||
"certmanager_hit_rate_limit": "Es wurden innerhalb kurzer Zeit schon zu viele Zertifikate für die exakt gleiche Domain {domain:s} ausgestellt. Bitte versuche es später nochmal. Besuche https://letsencrypt.org/docs/rate-limits/ für mehr Informationen",
|
||||
"certmanager_cert_signing_failed": "Das neue Zertifikat konnte nicht signiert werden",
|
||||
"certmanager_no_cert_file": "Die Zertifikatsdatei für die Domain {domain:s} (Datei: {file:s}) konnte nicht gelesen werden",
|
||||
"certmanager_conflicting_nginx_file": "Die Domain konnte nicht für die ACME challenge vorbereitet werden: Die nginx Konfigurationsdatei {filepath:s} verursacht Probleme und sollte vorher entfernt werden",
|
||||
"domain_cannot_remove_main": "Die primäre Domain konnten nicht entfernt werden. Lege zuerst einen neue primäre Domain fest",
|
||||
"certmanager_self_ca_conf_file_not_found": "Die Konfigurationsdatei der Zertifizierungsstelle für selbstsignierte Zertifikate wurde nicht gefunden (Datei {file:s})",
|
||||
"certmanager_acme_not_configured_for_domain": "Das Zertifikat für die Domain '{domain:s}' scheint nicht richtig installiert zu sein. Bitte führe den Befehl cert-install für diese Domain nochmals aus.",
|
||||
"certmanager_acme_not_configured_for_domain": "Die ACME Challenge kann im Moment nicht für {domain} ausgeführt werden, weil in ihrer nginx conf das entsprechende Code-Snippet fehlt... Bitte stellen Sie sicher, dass Ihre nginx-Konfiguration mit 'yunohost tools regen-conf nginx --dry-run --with-diff' auf dem neuesten Stand ist.",
|
||||
"certmanager_unable_to_parse_self_CA_name": "Der Name der Zertifizierungsstelle für selbstsignierte Zertifikate konnte nicht analysiert werden (Datei: {file:s})",
|
||||
"certmanager_http_check_timeout": "Eine Zeitüberschreitung ist aufgetreten, als der Server versuchte sich selbst über HTTP mit der öffentlichen IP (Domain '{domain:s}' mit der IP '{ip:s}') zu erreichen. Möglicherweise ist dafür hairpinning oder eine falsch konfigurierte Firewall/Router deines Servers dafür verantwortlich.",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte die Teilzertifikate von Let's Encrypt zusammenzusetzen. Die Installation/Erneuerung des Zertifikats wurde abgebrochen — bitte versuche es später erneut.",
|
||||
|
@ -338,5 +338,8 @@
|
|||
"diagnosis_found_warnings": "Habe {warnings} Ding(e) gefunden, die verbessert werden könnten für {category}.",
|
||||
"diagnosis_ip_dnsresolution_working": "Domänen-Namens-Auflösung funktioniert!",
|
||||
"diagnosis_ip_weird_resolvconf": "DNS Auflösung scheint zu funktionieren, aber sei vorsichtig wenn du eine eigene <code>/etc/resolv.conf</code> verwendest.",
|
||||
"diagnosis_display_tip": "Um die gefundenen Probleme zu sehen, kannst Du zum Diagnose-Bereich des webadmin gehen, oder 'yunohost diagnosis show --issues' in der Kommandozeile ausführen."
|
||||
"diagnosis_display_tip": "Um die gefundenen Probleme zu sehen, kannst Du zum Diagnose-Bereich des webadmin gehen, oder 'yunohost diagnosis show --issues' in der Kommandozeile ausführen.",
|
||||
"backup_archive_corrupted": "Das Backup-Archiv '{archive}' scheint beschädigt: {error}",
|
||||
"backup_archive_cant_retrieve_info_json": "Die Informationen für das Archiv '{archive}' konnten nicht geladen werden... Die Datei info.json wurde nicht gefunden (oder ist kein gültiges json).",
|
||||
"app_packaging_format_not_supported": "Diese App kann nicht installiert werden da das Paketformat nicht von der Yunohost-Version unterstützt wird. Denken Sie darüber nach das System zu aktualisieren."
|
||||
}
|
||||
|
|
188
locales/en.json
188
locales/en.json
|
@ -29,25 +29,26 @@
|
|||
"app_manifest_invalid": "Something is wrong with the app manifest: {error}",
|
||||
"app_not_upgraded": "The app '{failed_app}' failed to upgrade, and as a consequence the following apps' upgrades have been cancelled: {apps}",
|
||||
"app_not_correctly_installed": "{app:s} seems to be incorrectly installed",
|
||||
"app_not_installed": "Could not find the app '{app:s}' in the list of installed apps: {all_apps}",
|
||||
"app_not_installed": "Could not find {app:s} in the list of installed apps: {all_apps}",
|
||||
"app_not_properly_removed": "{app:s} has not been properly removed",
|
||||
"app_removed": "{app:s} removed",
|
||||
"app_requirements_checking": "Checking required packages for {app}…",
|
||||
"app_requirements_checking": "Checking required packages for {app}...",
|
||||
"app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}",
|
||||
"app_remove_after_failed_install": "Removing the app following the installation failure…",
|
||||
"app_remove_after_failed_install": "Removing the app following the installation failure...",
|
||||
"app_sources_fetch_failed": "Could not fetch sources files, is the URL correct?",
|
||||
"app_start_install": "Installing the app '{app}'…",
|
||||
"app_start_remove": "Removing the app '{app}'…",
|
||||
"app_start_backup": "Collecting files to be backed up for the app '{app}'…",
|
||||
"app_start_restore": "Restoring the app '{app}'…",
|
||||
"app_start_install": "Installing {app}...",
|
||||
"app_start_remove": "Removing {app}...",
|
||||
"app_start_backup": "Collecting files to be backed up for {app}...",
|
||||
"app_start_restore": "Restoring {app}...",
|
||||
"app_unknown": "Unknown app",
|
||||
"app_unsupported_remote_type": "Unsupported remote type used for the app",
|
||||
"app_upgrade_several_apps": "The following apps will be upgraded: {apps}",
|
||||
"app_upgrade_app_name": "Now upgrading {app}…",
|
||||
"app_upgrade_app_name": "Now upgrading {app}...",
|
||||
"app_upgrade_failed": "Could not upgrade {app:s}: {error}",
|
||||
"app_upgrade_script_failed": "An error occurred inside the app upgrade script",
|
||||
"app_upgrade_some_app_failed": "Some apps could not be upgraded",
|
||||
"app_upgraded": "{app:s} upgraded",
|
||||
"app_packaging_format_not_supported": "This app cannot be installed because its packaging format is not supported by your YunoHost version. You should probably consider upgrading your system.",
|
||||
"apps_already_up_to_date": "All apps are already up-to-date",
|
||||
"apps_catalog_init_success": "App catalog system initialized!",
|
||||
"apps_catalog_updating": "Updating application catalog…",
|
||||
|
@ -63,22 +64,22 @@
|
|||
"ask_new_path": "New path",
|
||||
"ask_password": "Password",
|
||||
"backup_abstract_method": "This backup method has yet to be implemented",
|
||||
"backup_actually_backuping": "Creating a backup archive from the collected files…",
|
||||
"backup_app_failed": "Could not back up the app '{app:s}'",
|
||||
"backup_applying_method_borg": "Sending all files to backup into borg-backup repository…",
|
||||
"backup_applying_method_copy": "Copying all files to backup…",
|
||||
"backup_applying_method_custom": "Calling the custom backup method '{method:s}'…",
|
||||
"backup_applying_method_tar": "Creating the backup TAR archive…",
|
||||
"backup_archive_app_not_found": "Could not find the app '{app:s}' in the backup archive",
|
||||
"backup_actually_backuping": "Creating a backup archive from the collected files...",
|
||||
"backup_app_failed": "Could not back up {app:s}",
|
||||
"backup_applying_method_borg": "Sending all files to backup into borg-backup repository...",
|
||||
"backup_applying_method_copy": "Copying all files to backup...",
|
||||
"backup_applying_method_custom": "Calling the custom backup method '{method:s}'...",
|
||||
"backup_applying_method_tar": "Creating the backup TAR archive...",
|
||||
"backup_archive_app_not_found": "Could not find {app:s} in the backup archive",
|
||||
"backup_archive_broken_link": "Could not access the backup archive (broken link to {path:s})",
|
||||
"backup_archive_name_exists": "A backup archive with this name already exists.",
|
||||
"backup_archive_name_unknown": "Unknown local backup archive named '{name:s}'",
|
||||
"backup_archive_open_failed": "Could not open the backup archive",
|
||||
"backup_archive_cant_retrieve_info_json": "Could not load infos for archive '{archive}' ... The info.json cannot be retrieved (or is not a valid json).",
|
||||
"backup_archive_cant_retrieve_info_json": "Could not load infos for archive '{archive}'... The info.json cannot be retrieved (or is not a valid json).",
|
||||
"backup_archive_corrupted": "It looks like the backup archive '{archive}' is corrupted : {error}",
|
||||
"backup_archive_system_part_not_available": "System part '{part:s}' unavailable in this backup",
|
||||
"backup_archive_writing_error": "Could not add the files '{source:s}' (named in the archive '{dest:s}') to be backed up into the compressed archive '{archive:s}'",
|
||||
"backup_ask_for_copying_if_needed": "Do you want to perform the backup using {size:s} MB temporarily? (This way is used since some files could not be prepared using a more efficient method.)",
|
||||
"backup_ask_for_copying_if_needed": "Do you want to perform the backup using {size:s}MB temporarily? (This way is used since some files could not be prepared using a more efficient method.)",
|
||||
"backup_borg_not_implemented": "The Borg backup method is not yet implemented",
|
||||
"backup_cant_mount_uncompress_archive": "Could not mount the uncompressed archive as write protected",
|
||||
"backup_cleaning_failed": "Could not clean up the temporary backup folder",
|
||||
|
@ -97,20 +98,19 @@
|
|||
"backup_method_copy_finished": "Backup copy finalized",
|
||||
"backup_method_custom_finished": "Custom backup method '{method:s}' finished",
|
||||
"backup_method_tar_finished": "TAR backup archive created",
|
||||
"backup_mount_archive_for_restore": "Preparing archive for restoration…",
|
||||
"backup_mount_archive_for_restore": "Preparing archive for restoration...",
|
||||
"backup_no_uncompress_archive_dir": "There is no such uncompressed archive directory",
|
||||
"backup_nothings_done": "Nothing to save",
|
||||
"backup_output_directory_forbidden": "Pick a different output directory. Backups cannot be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders",
|
||||
"backup_output_directory_not_empty": "You should pick an empty output directory",
|
||||
"backup_output_directory_required": "You must provide an output directory for the backup",
|
||||
"backup_output_symlink_dir_broken": "Your archive directory '{path:s}' is a broken symlink. Maybe you forgot to re/mount or plug in the storage medium it points to.",
|
||||
"backup_permission": "Backup permission for app {app:s}",
|
||||
"backup_php5_to_php7_migration_may_fail": "Could not convert your archive to support PHP 7, you may be unable to restore your PHP apps (reason: {error:s})",
|
||||
"backup_running_hooks": "Running backup hooks…",
|
||||
"backup_permission": "Backup permission for {app:s}",
|
||||
"backup_running_hooks": "Running backup hooks...",
|
||||
"backup_system_part_failed": "Could not backup the '{part:s}' system part",
|
||||
"backup_unable_to_organize_files": "Could not use the quick method to organize files in the archive",
|
||||
"backup_with_no_backup_script_for_app": "The app '{app:s}' has no backup script. Ignoring.",
|
||||
"backup_with_no_restore_script_for_app": "The '{app:s}' has no restoration script, you will not be able to automatically restore the backup of this app.",
|
||||
"backup_with_no_restore_script_for_app": "{app:s} has no restoration script, you will not be able to automatically restore the backup of this app.",
|
||||
"certmanager_acme_not_configured_for_domain": "The ACME challenge cannot be ran for {domain} right now because its nginx conf lacks the corresponding code snippet... Please make sure that your nginx configuration is up to date using `yunohost tools regen-conf nginx --dry-run --with-diff`.",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "The certificate for the domain '{domain:s}' is not issued by Let's Encrypt. Cannot renew it automatically!",
|
||||
"certmanager_attempt_to_renew_valid_cert": "The certificate for the domain '{domain:s}' is not about to expire! (You may use --force if you know what you're doing)",
|
||||
|
@ -120,16 +120,15 @@
|
|||
"certmanager_cert_install_success_selfsigned": "Self-signed certificate now installed for the domain '{domain:s}'",
|
||||
"certmanager_cert_renew_success": "Let's Encrypt certificate renewed for the domain '{domain:s}'",
|
||||
"certmanager_cert_signing_failed": "Could not sign the new certificate",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Trying to use the new certificate for {domain:s} did not work…",
|
||||
"certmanager_certificate_fetching_or_enabling_failed": "Trying to use the new certificate for {domain:s} did not work...",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Timed out when trying to fetch intermediate certificate from Let's Encrypt. Certificate installation/renewal aborted—please try again later.",
|
||||
"certmanager_domain_not_diagnosed_yet": "There is no diagnosis result for domain {domain} yet. Please re-run a diagnosis for categories 'DNS records' and 'Web' in the diagnosis section to check if the domain is ready for Let's Encrypt. (Or if you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_cert_not_selfsigned": "The certificate for domain {domain:s} is not self-signed. Are you sure you want to replace it? (Use '--force' to do so.)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "The DNS 'A' record for the domain '{domain:s}' is different from this server's IP. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_http_not_working": "It seems the domain {domain:s} cannot be accessed through HTTP. Check that your DNS and NGINX configuration is correct",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "The DNS records for domain '{domain:s}' is different from this server's IP. Please check the 'DNS records' (basic) category in the diagnosis for more info. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_http_not_working": "Domain {domain:s} does not seem to be accessible through HTTP. Please check the 'Web' category in the diagnosis for more info. (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_domain_unknown": "Unknown domain '{domain:s}'",
|
||||
"certmanager_error_no_A_record": "No DNS 'A' record found for '{domain:s}'. You need to make your domain name point to your machine to be able to install a Let's Encrypt certificate. (If you know what you are doing, use '--no-checks' to turn off those checks.)",
|
||||
"certmanager_warning_subdomain_dns_record": "Subdomain '{subdomain:s}' does not resolve to the same IP address as '{domain:s}'. Some features will not be available until you fix this and regenerate the certificate.",
|
||||
"certmanager_hit_rate_limit": "Too many certificates already issued for this exact set of domains {domain:s} recently. Please try again later. See https://letsencrypt.org/docs/rate-limits/ for more details",
|
||||
"certmanager_http_check_timeout": "Timed out when server tried to contact itself through HTTP using a public IP address (domain '{domain:s}' with IP '{ip:s}'). You may be experiencing a hairpinning issue, or the firewall/router ahead of your server is misconfigured.",
|
||||
"certmanager_no_cert_file": "Could not read the certificate file for the domain {domain:s} (file: {file:s})",
|
||||
"certmanager_self_ca_conf_file_not_found": "Could not find configuration file for self-signing authority (file: {file:s})",
|
||||
"certmanager_unable_to_parse_self_CA_name": "Could not parse name of self-signing authority (file: {file:s})",
|
||||
|
@ -155,9 +154,9 @@
|
|||
"diagnosis_everything_ok": "Everything looks good for {category}!",
|
||||
"diagnosis_failed": "Failed to fetch diagnosis result for category '{category}': {error}",
|
||||
"diagnosis_no_cache": "No diagnosis cache yet for category '{category}'",
|
||||
"diagnosis_ip_connected_ipv4": "The server is connected to the Internet through IPv4 !",
|
||||
"diagnosis_ip_connected_ipv4": "The server is connected to the Internet through IPv4!",
|
||||
"diagnosis_ip_no_ipv4": "The server does not have working IPv4.",
|
||||
"diagnosis_ip_connected_ipv6": "The server is connected to the Internet through IPv6 !",
|
||||
"diagnosis_ip_connected_ipv6": "The server is connected to the Internet through IPv6!",
|
||||
"diagnosis_ip_no_ipv6": "The server does not have working IPv6.",
|
||||
"diagnosis_ip_no_ipv6_tip": "Having a working IPv6 is not mandatory for your server to work, but it is better for the health of the Internet as a whole. IPv6 should usually be automatically configured by the system or your provider if it's available. Otherwise, you might need to configure a few things manually as explained in the documentation here: <a href='https://yunohost.org/#/ipv6'>https://yunohost.org/#/ipv6</a>. If you cannot enable IPv6 or if it seems too technical for you, you can also safely ignore this warning.",
|
||||
"diagnosis_ip_global": "Global IP: <code>{global}</code>",
|
||||
|
@ -171,8 +170,9 @@
|
|||
"diagnosis_dns_good_conf": "DNS records are correctly configured for domain {domain} (category {category})",
|
||||
"diagnosis_dns_bad_conf": "Some DNS records are missing or incorrect for domain {domain} (category {category})",
|
||||
"diagnosis_dns_missing_record": "According to the recommended DNS configuration, you should add a DNS record with the following info.<br>Type: <code>{type}</code><br>Name: <code>{name}</code><br>Value: <code>{value}</code>",
|
||||
"diagnosis_dns_discrepancy": "The following DNS record does not seem to follow the recommended configuration:<br>Type: <code>{type}</code><br>Name: <code>{name}</code><br>Current value: <code>{current}</code><br>Excepted value: <code>{value}</code>",
|
||||
"diagnosis_dns_discrepancy": "The following DNS record does not seem to follow the recommended configuration:<br>Type: <code>{type}</code><br>Name: <code>{name}</code><br>Current value: <code>{current}</code><br>Expected value: <code>{value}</code>",
|
||||
"diagnosis_dns_point_to_doc": "Please check the documentation at <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> if you need help about configuring DNS records.",
|
||||
"diagnosis_dns_try_dyndns_update_force": "This domain's DNS configuration should automatically be managed by Yunohost. If that's not the case, you can try to force an update using <cmd>yunohost dyndns update --force</cmd>.",
|
||||
"diagnosis_domain_expiration_not_found": "Unable to check the expiration date for some domains",
|
||||
"diagnosis_domain_not_found_details": "The domain {domain} doesn't exist in WHOIS database or is expired!",
|
||||
"diagnosis_domain_expiration_not_found_details": "The WHOIS information for domain {domain} doesn't seem to contain the information about the expiration date?",
|
||||
|
@ -193,6 +193,7 @@
|
|||
"diagnosis_swap_none": "The system has no swap at all. You should consider adding at least {recommended} of swap to avoid situations where the system runs out of memory.",
|
||||
"diagnosis_swap_notsomuch": "The system has only {total} swap. You should consider having at least {recommended} to avoid situations where the system runs out of memory.",
|
||||
"diagnosis_swap_ok": "The system has {total} of swap!",
|
||||
"diagnosis_swap_tip": "Please be careful and aware that if the server is hosting swap on an SD card or SSD storage, it may drastically reduce the life expectancy of the device`.",
|
||||
"diagnosis_mail_outgoing_port_25_ok": "The SMTP mail server is able to send emails (outgoing port 25 is not blocked).",
|
||||
"diagnosis_mail_outgoing_port_25_blocked": "The SMTP mail server cannot send emails to other servers because outgoing port 25 is blocked in IPv{ipversion}.",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_details": "You should first try to unblock outgoing port 25 in your internet router interface or your hosting provider interface. (Some hosting provider may require you to send them a support ticket for this).",
|
||||
|
@ -224,7 +225,6 @@
|
|||
"diagnosis_regenconf_allgood": "All configurations files are in line with the recommended configuration!",
|
||||
"diagnosis_regenconf_manually_modified": "Configuration file <code>{file}</code> appears to have been manually modified.",
|
||||
"diagnosis_regenconf_manually_modified_details": "This is probably OK if you know what you're doing! YunoHost will stop updating this file automatically... But beware that YunoHost upgrades could contain important recommended changes. If you want to, you can inspect the differences with <cmd>yunohost tools regen-conf {category} --dry-run --with-diff</cmd> and force the reset to the recommended configuration with <cmd>yunohost tools regen-conf {category} --force</cmd>",
|
||||
"diagnosis_security_all_good": "No critical security vulnerability was found.",
|
||||
"diagnosis_security_vulnerable_to_meltdown": "You appear vulnerable to the Meltdown criticial security vulnerability",
|
||||
"diagnosis_security_vulnerable_to_meltdown_details": "To fix this, you should upgrade your system and reboot to load the new linux kernel (or contact your server provider if this doesn't work). See https://meltdownattack.com/ for more infos.",
|
||||
"diagnosis_description_basesystem": "Base system",
|
||||
|
@ -236,7 +236,6 @@
|
|||
"diagnosis_description_web": "Web",
|
||||
"diagnosis_description_mail": "Email",
|
||||
"diagnosis_description_regenconf": "System configurations",
|
||||
"diagnosis_description_security": "Security checks",
|
||||
"diagnosis_ports_could_not_diagnose": "Could not diagnose if ports are reachable from outside in IPv{ipversion}.",
|
||||
"diagnosis_ports_could_not_diagnose_details": "Error: {error}",
|
||||
"diagnosis_ports_unreachable": "Port {port} is not reachable from outside.",
|
||||
|
@ -271,12 +270,12 @@
|
|||
"domain_dyndns_root_unknown": "Unknown DynDNS root domain",
|
||||
"domain_exists": "The domain already exists",
|
||||
"domain_hostname_failed": "Could not set new hostname. This might cause an issue later (it might be fine).",
|
||||
"domain_uninstall_app_first": "One or more apps are installed on this domain. Please uninstall them before proceeding to domain removal",
|
||||
"domain_uninstall_app_first": "Those applications are still installed on your domain: {apps}. Please uninstall them before proceeding to domain removal",
|
||||
"domain_unknown": "Unknown domain",
|
||||
"domains_available": "Available domains:",
|
||||
"done": "Done",
|
||||
"downloading": "Downloading…",
|
||||
"dpkg_is_broken": "You cannot do this right now because dpkg/APT (the system package managers) seems to be in a broken state… You can try to solve this issue by connecting through SSH and running `sudo dpkg --configure -a`.",
|
||||
"dpkg_is_broken": "You cannot do this right now because dpkg/APT (the system package managers) seems to be in a broken state… You can try to solve this issue by connecting through SSH and running `sudo apt install --fix-broken` and/or `sudo dpkg --configure -a`.",
|
||||
"dpkg_lock_not_available": "This command can't be run right now because another program seems to be using the lock of dpkg (the system package manager)",
|
||||
"dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.",
|
||||
"dyndns_could_not_check_available": "Could not check if {domain:s} is available on {provider:s}.",
|
||||
|
@ -285,7 +284,7 @@
|
|||
"dyndns_cron_removed": "DynDNS cron job removed",
|
||||
"dyndns_ip_update_failed": "Could not update IP address to DynDNS",
|
||||
"dyndns_ip_updated": "Updated your IP on DynDNS",
|
||||
"dyndns_key_generating": "Generating DNS key… It may take a while.",
|
||||
"dyndns_key_generating": "Generating DNS key... It may take a while.",
|
||||
"dyndns_key_not_found": "DNS key not found for the domain",
|
||||
"dyndns_no_domain_registered": "No domain registered with DynDNS",
|
||||
"dyndns_provider_unreachable": "Unable to reach DynDNS provider {provider}: either your YunoHost is not correctly connected to the internet or the dynette server is down.",
|
||||
|
@ -293,9 +292,9 @@
|
|||
"dyndns_registration_failed": "Could not register DynDNS domain: {error:s}",
|
||||
"dyndns_domain_not_provided": "DynDNS provider {provider:s} cannot provide domain {domain:s}.",
|
||||
"dyndns_unavailable": "The domain '{domain:s}' is unavailable.",
|
||||
"executing_command": "Executing command '{command:s}'…",
|
||||
"executing_script": "Executing script '{script:s}'…",
|
||||
"extracting": "Extracting…",
|
||||
"executing_command": "Executing command '{command:s}'...",
|
||||
"executing_script": "Executing script '{script:s}'...",
|
||||
"extracting": "Extracting...",
|
||||
"experimental_feature": "Warning: This feature is experimental and not considered stable, you should not use it unless you know what you are doing.",
|
||||
"field_invalid": "Invalid field '{:s}'",
|
||||
"file_does_not_exist": "The file {path:s} does not exist.",
|
||||
|
@ -327,7 +326,7 @@
|
|||
"good_practices_about_user_password": "You are now about to define a new user password. The password should be at least 8 characters long—though it is good practice to use a longer password (i.e. a passphrase) and/or to a variation of characters (uppercase, lowercase, digits and special characters).",
|
||||
"group_already_exist": "Group {group} already exists",
|
||||
"group_already_exist_on_system": "Group {group} already exists in the system groups",
|
||||
"group_already_exist_on_system_but_removing_it": "Group {group} already exists in the system groups, but YunoHost will remove it…",
|
||||
"group_already_exist_on_system_but_removing_it": "Group {group} already exists in the system groups, but YunoHost will remove it...",
|
||||
"group_created": "Group '{group}' created",
|
||||
"group_creation_failed": "Could not create the group '{group}': {error}",
|
||||
"group_cannot_edit_all_users": "The group 'all_users' cannot be edited manually. It is a special group meant to contain all users registered in YunoHost",
|
||||
|
@ -406,72 +405,42 @@
|
|||
"mail_unavailable": "This e-mail address is reserved and shall be automatically allocated to the very first user",
|
||||
"main_domain_change_failed": "Unable to change the main domain",
|
||||
"main_domain_changed": "The main domain has been changed",
|
||||
"migrate_tsig_end": "Migration to HMAC-SHA-512 finished",
|
||||
"migrate_tsig_failed": "Could not migrate the DynDNS domain '{domain}' to HMAC-SHA-512, rolling back. Error: {error_code}, {error}",
|
||||
"migrate_tsig_start": "Insufficiently secure key algorithm detected for TSIG signature of the domain '{domain}', initiating migration to the more secure HMAC-SHA-512",
|
||||
"migrate_tsig_wait": "Waiting three minutes for the DynDNS server to take the new key into account…",
|
||||
"migrate_tsig_wait_2": "2min…",
|
||||
"migrate_tsig_wait_3": "1min…",
|
||||
"migrate_tsig_wait_4": "30 seconds…",
|
||||
"migrate_tsig_not_needed": "You do not appear to use a DynDNS domain, so no migration is needed.",
|
||||
"migration_description_0001_change_cert_group_to_sslcert": "Change certificates group permissions from 'metronome' to 'ssl-cert'",
|
||||
"migration_description_0002_migrate_to_tsig_sha256": "Improve security of DynDNS TSIG updates by using SHA-512 instead of MD5",
|
||||
"migration_description_0003_migrate_to_stretch": "Upgrade the system to Debian Stretch and YunoHost 3.0",
|
||||
"migration_description_0004_php5_to_php7_pools": "Reconfigure the PHP pools to use PHP 7 instead of 5",
|
||||
"migration_description_0005_postgresql_9p4_to_9p6": "Migrate databases from PostgreSQL 9.4 to 9.6",
|
||||
"migration_description_0006_sync_admin_and_root_passwords": "Synchronize admin and root passwords",
|
||||
"migration_description_0007_ssh_conf_managed_by_yunohost_step1": "Let the SSH configuration be managed by YunoHost (step 1, automatic)",
|
||||
"migration_description_0008_ssh_conf_managed_by_yunohost_step2": "Let the SSH configuration be managed by YunoHost (step 2, manual)",
|
||||
"migration_description_0009_decouple_regenconf_from_services": "Decouple the regen-conf mechanism from services",
|
||||
"migration_description_0010_migrate_to_apps_json": "Remove deprecated apps catalogs and use the new unified 'apps.json' list instead (outdated, replaced by migration 13)",
|
||||
"migration_description_0011_setup_group_permission": "Set up user groups and permissions for apps and services",
|
||||
"migration_description_0012_postgresql_password_to_md5_authentication": "Force PostgreSQL authentication to use MD5 for local connections",
|
||||
"migration_description_0013_futureproof_apps_catalog_system": "Migrate to the new future-proof apps catalog system",
|
||||
"migration_description_0014_remove_app_status_json": "Remove legacy status.json app files",
|
||||
"migration_0003_start": "Starting migration to Stretch. The logs will be available in {logfile}.",
|
||||
"migration_0003_patching_sources_list": "Patching the sources.lists…",
|
||||
"migration_0003_main_upgrade": "Starting main upgrade…",
|
||||
"migration_0003_fail2ban_upgrade": "Starting the Fail2Ban upgrade…",
|
||||
"migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset it to its original state first… The previous file will be available as {backup_dest}.",
|
||||
"migration_0003_yunohost_upgrade": "Starting the YunoHost package upgrade… The migration will end, but the actual upgrade will happen immediately afterwards. After the operation is complete, you might have to log in to the webadmin page again.",
|
||||
"migration_0003_not_jessie": "The current Debian distribution is not Jessie!",
|
||||
"migration_0003_system_not_fully_up_to_date": "Your system is not fully up-to-date. Please perform a regular upgrade before running the migration to Stretch.",
|
||||
"migration_0003_still_on_jessie_after_main_upgrade": "Something went wrong during the main upgrade: Is the system still on Jessie‽ To investigate the issue, please look at {log}:s…",
|
||||
"migration_0003_general_warning": "Please note that this migration is a delicate operation. The YunoHost team did its best to review and test it, but the migration might still break parts of the system or its apps.\n\nTherefore, it is recommended to:\n - Perform a backup of any critical data or app. More info on https://yunohost.org/backup;\n - Be patient after launching the migration: Depending on your Internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external e-mail clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port (465) will automatically be closed, and the new port (587) will be opened in the firewall. You and your users *will* have to adapt the configuration of your e-mail clients accordingly.",
|
||||
"migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an app catalog, or are not flagged as 'working'. Consequently, it cannot be guaranteed that they will still work after the upgrade: {problematic_apps}",
|
||||
"migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten following the upgrade: {manually_modified_files}",
|
||||
"migration_0005_postgresql_94_not_installed": "PostgreSQL was not installed on your system. Nothing to do.",
|
||||
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 is installed, but not postgresql 9.6‽ Something weird might have happened on your system :(…",
|
||||
"migration_0005_not_enough_space": "Make sufficient space available in {path} to run the migration.",
|
||||
"migration_0006_disclaimer": "YunoHost now expects the admin and root passwords to be synchronized. This migration replaces your root password with the admin password.",
|
||||
"migration_0007_cancelled": "Could not improve the way your SSH configuration is managed.",
|
||||
"migration_0007_cannot_restart": "SSH can't be restarted after trying to cancel migration number 6.",
|
||||
"migration_0008_general_disclaimer": "To improve the security of your server, it is recommended to let YunoHost manage the SSH configuration. Your current SSH setup differs from the recommendation. If you let YunoHost reconfigure it, the way you connect to your server through SSH will change thusly:",
|
||||
"migration_0008_port": "• You will have to connect using port 22 instead of your current custom SSH port. Feel free to reconfigure it;",
|
||||
"migration_0008_root": "• You will not be able to connect as root through SSH. Instead you should use the admin user;",
|
||||
"migration_0008_dsa": "• The DSA key will be turned off. Hence, you might need to invalidate a spooky warning from your SSH client, and recheck the fingerprint of your server;",
|
||||
"migration_0008_warning": "If you understand those warnings and want YunoHost to override your current configuration, run the migration. Otherwise, you can also skip the migration, though it is not recommended.",
|
||||
"migration_0008_no_warning": "Overriding your SSH configuration should be safe, though this cannot be promised! Run the migration to override it. Otherwise, you can also skip the migration, though it is not recommended.",
|
||||
"migration_0009_not_needed": "This migration already happened somehow… (?) Skipping.",
|
||||
"migration_0011_backup_before_migration": "Creating a backup of LDAP database and apps settings prior to the actual migration.",
|
||||
"migration_0011_can_not_backup_before_migration": "The backup of the system could not be completed before the migration failed. Error: {error:s}",
|
||||
"migration_description_0015_migrate_to_buster": "Upgrade the system to Debian Buster and YunoHost 4.x",
|
||||
"migration_description_0016_php70_to_php73_pools": "Migrate php7.0-fpm 'pool' conf files to php7.3",
|
||||
"migration_description_0017_postgresql_9p6_to_11": "Migrate databases from PostgreSQL 9.6 to 11",
|
||||
"migration_description_0018_xtable_to_nftable": "Migrate old network traffic rules to the new nftable system",
|
||||
"migration_0011_create_group": "Creating a group for each user…",
|
||||
"migration_0011_done": "Migration completed. You are now able to manage usergroups.",
|
||||
"migration_0011_slapd_config_will_be_overwritten": "It looks like you manually edited the slapd configuration. For this critical migration, YunoHost needs to force the update of the slapd configuration. The original files will be backuped in {conf_backup_folder}.",
|
||||
"migration_0011_LDAP_update_failed": "Could not update LDAP. Error: {error:s}",
|
||||
"migration_0011_migrate_permission": "Migrating permissions from apps settings to LDAP…",
|
||||
"migration_0011_migration_failed_trying_to_rollback": "Could not migrate… trying to roll back the system.",
|
||||
"migration_0011_rollback_success": "System rolled back.",
|
||||
"migration_0011_update_LDAP_database": "Updating LDAP database…",
|
||||
"migration_0011_update_LDAP_schema": "Updating LDAP schema…",
|
||||
"migration_0011_migrate_permission": "Migrating permissions from apps settings to LDAP...",
|
||||
"migration_0011_update_LDAP_database": "Updating LDAP database...",
|
||||
"migration_0011_update_LDAP_schema": "Updating LDAP schema...",
|
||||
"migration_0011_failed_to_remove_stale_object": "Could not remove stale object {dn}: {error}",
|
||||
"migration_0015_start" : "Starting migration to Buster",
|
||||
"migration_0015_patching_sources_list": "Patching the sources.lists...",
|
||||
"migration_0015_main_upgrade": "Starting main upgrade...",
|
||||
"migration_0015_still_on_stretch_after_main_upgrade": "Something went wrong during the main upgrade, the system appears to still be on Debian Stretch",
|
||||
"migration_0015_yunohost_upgrade" : "Starting YunoHost core upgrade...",
|
||||
"migration_0015_not_stretch" : "The current Debian distribution is not Stretch!",
|
||||
"migration_0015_not_enough_free_space" : "Free space is pretty low in /var/! You should have at least 1GB free to run this migration.",
|
||||
"migration_0015_system_not_fully_up_to_date": "Your system is not fully up-to-date. Please perform a regular upgrade before running the migration to Buster.",
|
||||
"migration_0015_general_warning": "Please note that this migration is a delicate operation. The YunoHost team did its best to review and test it, but the migration might still break parts of the system or its apps.\n\nTherefore, it is recommended to:\n - Perform a backup of any critical data or app. More info on https://yunohost.org/backup;\n - Be patient after launching the migration: Depending on your Internet connection and hardware, it might take up to a few hours for everything to upgrade.",
|
||||
"migration_0015_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from the YunoHost app catalog, or are not flagged as 'working'. Consequently, it cannot be guaranteed that they will still work after the upgrade: {problematic_apps}",
|
||||
"migration_0015_modified_files": "Please note that the following files were found to be manually modified and might be overwritten following the upgrade: {manually_modified_files}",
|
||||
"migration_0015_specific_upgrade": "Starting upgrade of system packages that needs to be upgrade independently...",
|
||||
"migration_0015_cleaning_up": "Cleaning up cache and packages not useful anymore...",
|
||||
"migration_0015_weak_certs": "The following certificates were found to still use weak signature algorithms and have to be upgraded to be compatible with the next version of nginx: {certs}",
|
||||
"migration_0017_postgresql_96_not_installed": "PostgreSQL was not installed on your system. Nothing to do.",
|
||||
"migration_0017_postgresql_11_not_installed": "PostgreSQL 9.6 is installed, but not postgresql 11‽ Something weird might have happened on your system :(...",
|
||||
"migration_0017_not_enough_space": "Make sufficient space available in {path} to run the migration.",
|
||||
"migration_0018_failed_to_migrate_iptables_rules": "Failed to migrate legacy iptables rules to nftables: {error}",
|
||||
"migration_0018_failed_to_reset_legacy_rules": "Failed to reset legacy iptables rules: {error}",
|
||||
"migrations_already_ran": "Those migrations are already done: {ids}",
|
||||
"migrations_cant_reach_migration_file": "Could not access migrations files at the path '%s'",
|
||||
"migrations_dependencies_not_satisfied": "Run these migrations: '{dependencies_id}', before migration {id}.",
|
||||
"migrations_failed_to_load_migration": "Could not load migration {id}: {error}",
|
||||
"migrations_exclusive_options": "'--auto', '--skip', and '--force-rerun' are mutually exclusive options.",
|
||||
"migrations_list_conflict_pending_done": "You cannot use both '--previous' and '--done' at the same time.",
|
||||
"migrations_loading_migration": "Loading migration {id}…",
|
||||
"migrations_loading_migration": "Loading migration {id}...",
|
||||
"migrations_migration_has_failed": "Migration {id} did not complete, aborting. Error: {exception}",
|
||||
"migrations_must_provide_explicit_targets": "You must provide explicit targets when using '--skip' or '--force-rerun'",
|
||||
"migrations_need_to_accept_disclaimer": "To run the migration {id}, your must accept the following disclaimer:\n---\n{disclaimer}\n---\nIf you accept to run the migration, please re-run the command with the option '--accept-disclaimer'.",
|
||||
|
@ -479,14 +448,12 @@
|
|||
"migrations_no_such_migration": "There is no migration called '{id}'",
|
||||
"migrations_not_pending_cant_skip": "Those migrations are not pending, so cannot be skipped: {ids}",
|
||||
"migrations_pending_cant_rerun": "Those migrations are still pending, so cannot be run again: {ids}",
|
||||
"migrations_running_forward": "Running migration {id}…",
|
||||
"migrations_skip_migration": "Skipping migration {id}…",
|
||||
"migrations_running_forward": "Running migration {id}...",
|
||||
"migrations_skip_migration": "Skipping migration {id}...",
|
||||
"migrations_success_forward": "Migration {id} completed",
|
||||
"migrations_to_be_ran_manually": "Migration {id} has to be run manually. Please go to Tools → Migrations on the webadmin page, or run `yunohost tools migrations migrate`.",
|
||||
"no_internet_connection": "The server is not connected to the Internet",
|
||||
"not_enough_disk_space": "Not enough free space on '{path:s}'",
|
||||
"operation_interrupted": "The operation was manually interrupted?",
|
||||
"package_unknown": "Unknown package '{pkgname}'",
|
||||
"packages_upgrade_failed": "Could not upgrade all the packages",
|
||||
"password_listed": "This password is among the most used passwords in the world. Please choose something more unique.",
|
||||
"password_too_simple_1": "The password needs to be at least 8 characters long",
|
||||
|
@ -535,9 +502,11 @@
|
|||
"regenconf_would_be_updated": "The configuration would have been updated for category '{category}'",
|
||||
"regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for category '{category}'…",
|
||||
"regenconf_failed": "Could not regenerate the configuration for category(s): {categories}",
|
||||
"regenconf_pending_applying": "Applying pending configuration for category '{category}'…",
|
||||
"regenconf_pending_applying": "Applying pending configuration for category '{category}'...",
|
||||
"regenconf_need_to_explicitly_specify_ssh": "The ssh configuration has been manually modified, but you need to explicitly specify category 'ssh' with --force to actually apply the changes.",
|
||||
"restore_already_installed_app": "An app with the ID '{app:s}' is already installed",
|
||||
"restore_app_failed": "Could not restore the app '{app:s}'",
|
||||
"restore_already_installed_apps": "The following apps can't be restored because they are already installed: {apps}",
|
||||
"restore_app_failed": "Could not restore {app:s}",
|
||||
"restore_cleaning_failed": "Could not clean up the temporary restoration directory",
|
||||
"restore_complete": "Restored",
|
||||
"restore_confirm_yunohost_installed": "Do you really want to restore an already installed system? [{answers:s}]",
|
||||
|
@ -569,8 +538,7 @@
|
|||
"service_description_metronome": "Manage XMPP instant messaging accounts",
|
||||
"service_description_mysql": "Stores app data (SQL database)",
|
||||
"service_description_nginx": "Serves or provides access to all the websites hosted on your server",
|
||||
"service_description_nslcd": "Handles YunoHost user shell connection",
|
||||
"service_description_php7.0-fpm": "Runs apps written in PHP with NGINX",
|
||||
"service_description_php7.3-fpm": "Runs apps written in PHP with NGINX",
|
||||
"service_description_postfix": "Used to send and receive e-mails",
|
||||
"service_description_redis-server": "A specialized database used for rapid data access, task queue, and communication between programs",
|
||||
"service_description_rspamd": "Filters spam, and other e-mail related features",
|
||||
|
@ -600,7 +568,7 @@
|
|||
"ssowat_conf_updated": "SSOwat configuration updated",
|
||||
"system_upgraded": "System upgraded",
|
||||
"system_username_exists": "Username already exists in the list of system users",
|
||||
"this_action_broke_dpkg": "This action broke dpkg/APT (the system package managers)… You can try to solve this issue by connecting through SSH and running `sudo dpkg --configure -a`.",
|
||||
"this_action_broke_dpkg": "This action broke dpkg/APT (the system package managers)... You can try to solve this issue by connecting through SSH and running `sudo apt install --fix-broken` and/or `sudo dpkg --configure -a`.",
|
||||
"tools_upgrade_at_least_one": "Please specify '--apps', or '--system'",
|
||||
"tools_upgrade_cant_both": "Cannot upgrade both system and apps at the same time",
|
||||
"tools_upgrade_cant_hold_critical_packages": "Could not hold critical packages…",
|
||||
|
@ -610,15 +578,15 @@
|
|||
"tools_upgrade_special_packages": "Now upgrading 'special' (yunohost-related) packages…",
|
||||
"tools_upgrade_special_packages_explanation": "The special upgrade will continue in the background. Please don't start any other actions on your server for the next ~10 minutes (depending on hardware speed). After this, you may have to re-log in to the webadmin. The upgrade log will be available in Tools → Log (in the webadmin) or using 'yunohost log list' (from the command-line).",
|
||||
"tools_upgrade_special_packages_completed": "YunoHost package upgrade completed.\nPress [Enter] to get the command line back",
|
||||
"unbackup_app": "App '{app:s}' will not be saved",
|
||||
"unbackup_app": "{app:s} will not be saved",
|
||||
"unexpected_error": "Something unexpected went wrong: {error}",
|
||||
"unlimit": "No quota",
|
||||
"unrestore_app": "App '{app:s}' will not be restored",
|
||||
"unrestore_app": "{app:s} will not be restored",
|
||||
"update_apt_cache_failed": "Could not to update the cache of APT (Debian's package manager). Here is a dump of the sources.list lines, which might help identify problematic lines: \n{sourceslist}",
|
||||
"update_apt_cache_warning": "Something went wrong while updating the cache of APT (Debian's package manager). Here is a dump of the sources.list lines, which might help identify problematic lines: \n{sourceslist}",
|
||||
"updating_apt_cache": "Fetching available upgrades for system packages…",
|
||||
"updating_apt_cache": "Fetching available upgrades for system packages...",
|
||||
"upgrade_complete": "Upgrade complete",
|
||||
"upgrading_packages": "Upgrading packages…",
|
||||
"upgrading_packages": "Upgrading packages...",
|
||||
"upnp_dev_not_found": "No UPnP device found",
|
||||
"upnp_disabled": "UPnP turned off",
|
||||
"upnp_enabled": "UPnP turned on",
|
||||
|
@ -637,7 +605,7 @@
|
|||
"yunohost_ca_creation_failed": "Could not create certificate authority",
|
||||
"yunohost_ca_creation_success": "Local certification authority created.",
|
||||
"yunohost_configured": "YunoHost is now configured",
|
||||
"yunohost_installing": "Installing YunoHost…",
|
||||
"yunohost_installing": "Installing YunoHost...",
|
||||
"yunohost_not_installed": "YunoHost is not correctly installed. Please run 'yunohost tools postinstall'",
|
||||
"yunohost_postinstall_end_tip": "The post-install completed! To finalize your setup, please consider:\n - adding a first user through the 'Users' section of the webadmin (or 'yunohost user create <username>' in command-line);\n - diagnose potential issues through the 'Diagnosis' section of the webadmin (or 'yunohost diagnosis run' in command-line);\n - reading the 'Finalizing your setup' and 'Getting to know Yunohost' parts in the admin documentation: https://yunohost.org/admindoc."
|
||||
}
|
||||
|
|
|
@ -248,7 +248,7 @@
|
|||
"ldap_init_failed_to_create_admin": "LDAP-iniciato ne povis krei administran uzanton",
|
||||
"backup_output_directory_required": "Vi devas provizi elirejan dosierujon por la sekurkopio",
|
||||
"tools_upgrade_cant_unhold_critical_packages": "Ne povis malŝalti kritikajn pakojn…",
|
||||
"log_link_to_log": "Plena ŝtipo de ĉi tiu operacio: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\"> {desc} </a>'",
|
||||
"log_link_to_log": "Plena ŝtipo de ĉi tiu operacio: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc} </a>'",
|
||||
"global_settings_cant_serialize_settings": "Ne eblis serialigi datumojn pri agordoj, motivo: {reason:s}",
|
||||
"backup_running_hooks": "Kurado de apogaj hokoj …",
|
||||
"certmanager_domain_unknown": "Nekonata domajno '{domain:s}'",
|
||||
|
@ -351,7 +351,7 @@
|
|||
"dyndns_ip_update_failed": "Ne povis ĝisdatigi IP-adreson al DynDNS",
|
||||
"migration_description_0004_php5_to_php7_pools": "Rekonfigu la PHP-naĝejojn por uzi PHP 7 anstataŭ 5",
|
||||
"ssowat_conf_updated": "SSOwat-agordo ĝisdatigita",
|
||||
"log_link_to_failed_log": "Ne povis plenumi la operacion '{desc}'. Bonvolu provizi la plenan protokolon de ĉi tiu operacio per <a href=\"#/tools/logs/{name}\"> alklakante ĉi tie </a> por akiri helpon",
|
||||
"log_link_to_failed_log": "Ne povis plenumi la operacion '{desc}'. Bonvolu provizi la plenan protokolon de ĉi tiu operacio per <a href=\"#/tools/logs/{name}\">alklakante ĉi tie </a> por akiri helpon",
|
||||
"user_home_creation_failed": "Ne povis krei dosierujon \"home\" por uzanto",
|
||||
"pattern_backup_archive_name": "Devas esti valida dosiernomo kun maksimume 30 signoj, alfanombraj kaj -_. signoj nur",
|
||||
"restore_cleaning_failed": "Ne eblis purigi la adresaron de provizora restarigo",
|
||||
|
@ -508,15 +508,15 @@
|
|||
"diagnosis_basesystem_ynh_main_version": "Servilo funkcias YunoHost {main_version} ({repo})",
|
||||
"diagnosis_basesystem_ynh_inconsistent_versions": "Vi prizorgas malkonsekvencajn versiojn de la YunoHost-pakoj... plej probable pro malsukcesa aŭ parta ĝisdatigo.",
|
||||
"diagnosis_display_tip_web": "Vi povas iri al la sekcio Diagnozo (en la hejmekrano) por vidi la trovitajn problemojn.",
|
||||
"diagnosis_cache_still_valid": "(Kaŝmemoro ankoraŭ validas por {category} diagnozo. Ankoraŭ ne re-diagnoza!)",
|
||||
"diagnosis_cache_still_valid": "(La kaŝmemoro ankoraŭ validas por {category} diagnozo. Vi ankoraŭ ne diagnozas ĝin!)",
|
||||
"diagnosis_cant_run_because_of_dep": "Ne eblas fari diagnozon por {category} dum estas gravaj problemoj rilataj al {dep}.",
|
||||
"diagnosis_display_tip_cli": "Vi povas aranĝi 'yunohost diagnosis show --issues' por aperigi la trovitajn problemojn.",
|
||||
"diagnosis_failed_for_category": "Diagnozo malsukcesis por kategorio '{category}': {error}",
|
||||
"app_upgrade_script_failed": "Eraro okazis en la skripto pri ĝisdatiga programo",
|
||||
"diagnosis_diskusage_verylow": "Stokado {mountpoint} (sur aparato {device)) restas nur {free} ({free_percent}%) spaco. Vi vere konsideru purigi iom da spaco.",
|
||||
"diagnosis_diskusage_verylow": "Stokado <code>{mountpoint}</code> (sur aparato <code> {device} </code>) nur restas {free} ({free_percent}%) spaco restanta (el {total}). Vi vere konsideru purigi iom da spaco !",
|
||||
"diagnosis_ram_verylow": "La sistemo nur restas {available} ({available_percent}%) RAM! (el {total})",
|
||||
"diagnosis_mail_outgoing_port_25_blocked": "Eliranta haveno 25 ŝajnas esti blokita. Vi devas provi malŝlosi ĝin en via agorda panelo de provizanto (aŭ gastiganto). Dume la servilo ne povos sendi retpoŝtojn al aliaj serviloj.",
|
||||
"diagnosis_http_bad_status_code": "La diagnoza sistemo ne povis atingi vian servilon. Povas esti, ke alia maŝino respondis anstataŭ via servilo. Vi devus kontroli, ke vi ĝuste redonas la havenon 80, ke via agordo de nginx estas ĝisdatigita kaj ke reverso-prokuro ne interbatalas.",
|
||||
"diagnosis_http_bad_status_code": "Ĝi aspektas kiel alia maŝino (eble via interreta enkursigilo) respondita anstataŭ via servilo.<br>1. La plej ofta kaŭzo por ĉi tiu afero estas, ke la haveno 80 (kaj 443) <a href='https://yunohost.org/isp_box_config'> ne estas ĝuste senditaj al via servilo </a>.<br>2. Pri pli kompleksaj agordoj: certigu, ke neniu fajroŝirmilo aŭ reverso-prokuro ne interbatalas.",
|
||||
"main_domain_changed": "La ĉefa domajno estis ŝanĝita",
|
||||
"yunohost_postinstall_end_tip": "La post-instalado finiĝis! Por fini vian agordon, bonvolu konsideri:\n - aldonado de unua uzanto tra la sekcio 'Uzantoj' de la retadreso (aŭ 'uzanto de yunohost kreu <uzantnomon>' en komandlinio);\n - diagnozi eblajn problemojn per la sekcio 'Diagnozo' de la reteja administrado (aŭ 'diagnoza yunohost-ekzekuto' en komandlinio);\n - legante la partojn 'Finigi vian agordon' kaj 'Ekkoni Yunohost' en la administra dokumentado: https://yunohost.org/admindoc.",
|
||||
"migration_description_0014_remove_app_status_json": "Forigi heredajn dosierojn",
|
||||
|
@ -526,21 +526,21 @@
|
|||
"diagnosis_ip_no_ipv6": "La servilo ne havas funkciantan IPv6.",
|
||||
"diagnosis_ip_not_connected_at_all": "La servilo tute ne ŝajnas esti konektita al la Interreto !?",
|
||||
"diagnosis_ip_dnsresolution_working": "Rezolucio pri domajna nomo funkcias !",
|
||||
"diagnosis_ip_weird_resolvconf": "DNS-rezolucio ŝajnas funkcii, sed atentu, ke vi ŝajnas uzi kutimon /etc/resolv.conf.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "Anstataŭe, ĉi tiu dosiero estu ligilo kun /etc/resolvconf/run/resolv.conf mem montrante al 127.0.0.1 (dnsmasq). La efektivaj solvantoj devas agordi en /etc/resolv.dnsmasq.conf.",
|
||||
"diagnosis_dns_good_conf": "Bona DNS-agordo por domajno {domain} (kategorio {category})",
|
||||
"diagnosis_dns_bad_conf": "Malbona aŭ mankas DNS-agordo por domajno {domain} (kategorio {category})",
|
||||
"diagnosis_ip_weird_resolvconf": "DNS-rezolucio ŝajnas funkcii, sed ŝajnas ke vi uzas kutiman <code>/etc/resolv.conf </code>.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "La dosiero <code>/etc/resolv.conf</code> devas esti ligilo al <code> /etc/resolvconf/run/resolv.conf </code> indikante <code> 127.0.0.1 </code> (dnsmasq). Se vi volas permane agordi DNS-solvilojn, bonvolu redakti <code> /etc/resolv.dnsmasq.conf </code>.",
|
||||
"diagnosis_dns_good_conf": "DNS-registroj estas ĝuste agorditaj por domajno {domain} (kategorio {category})",
|
||||
"diagnosis_dns_bad_conf": "Iuj DNS-registroj mankas aŭ malĝustas por domajno {domain} (kategorio {category})",
|
||||
"diagnosis_ram_ok": "La sistemo ankoraŭ havas {available} ({available_percent}%) RAM forlasita de {total}.",
|
||||
"diagnosis_swap_none": "La sistemo tute ne havas interŝanĝon. Vi devus pripensi aldoni almenaŭ {recommended} da interŝanĝo por eviti situaciojn en kiuj la sistemo restas sen memoro.",
|
||||
"diagnosis_swap_notsomuch": "La sistemo havas nur {total}-interŝanĝon. Vi konsideru havi almenaŭ {recommended} por eviti situaciojn en kiuj la sistemo restas sen memoro.",
|
||||
"diagnosis_regenconf_manually_modified_details": "Ĉi tio probable estas bona tiel longe kiel vi scias kion vi faras;)!",
|
||||
"diagnosis_regenconf_manually_modified_details": "Ĉi tio probable estas bona, se vi scias, kion vi faras! YunoHost ĉesigos ĝisdatigi ĉi tiun dosieron aŭtomate ... Sed atentu, ke YunoHost-ĝisdatigoj povus enhavi gravajn rekomendajn ŝanĝojn. Se vi volas, vi povas inspekti la diferencojn per <cmd>yyunohost tools regen-conf {category} --dry-run --with-diff</cmd> kaj devigi la reset al la rekomendita agordo per <cmd>yunohost tools regen-conf {category} --force</cmd>",
|
||||
"diagnosis_regenconf_manually_modified_debian": "Agordodosiero {file} estis modifita permane kompare kun la defaŭlta Debian.",
|
||||
"diagnosis_regenconf_manually_modified_debian_details": "Ĉi tio probable estas bona, sed devas observi ĝin...",
|
||||
"diagnosis_security_all_good": "Neniu kritika sekureca vundebleco estis trovita.",
|
||||
"diagnosis_security_vulnerable_to_meltdown": "Vi ŝajnas vundebla al la kritiko-vundebleco de Meltdown",
|
||||
"diagnosis_no_cache": "Neniu diagnoza kaŝmemoro por kategorio '{category}'",
|
||||
"diagnosis_ip_broken_dnsresolution": "Rezolucio pri domajna nomo rompiĝas pro iu kialo... Ĉu fajroŝirmilo blokas DNS-petojn ?",
|
||||
"diagnosis_ip_broken_resolvconf": "Rezolucio pri domajna nomo ŝajnas esti rompita en via servilo, kiu ŝajnas rilata al /etc/resolv.conf ne notante 127.0.0.1.",
|
||||
"diagnosis_ip_broken_resolvconf": "Rezolucio pri domajna nomo estas rompita en via servilo, kiu ŝajnas rilata al <code>/etc/resolv.conf</code> ne montrante al <code>127.0.0.1 </code>.",
|
||||
"diagnosis_dns_missing_record": "Laŭ la rekomendita DNS-agordo, vi devas aldoni DNS-registron kun\ntipo: {type}\nnomo: {name}\nvaloro: {value}",
|
||||
"diagnosis_dns_discrepancy": "La DNS-registro kun tipo {type} kaj nomo {name} ne kongruas kun la rekomendita agordo.\nNuna valoro: {current}\nEsceptita valoro: {value}",
|
||||
"diagnosis_services_conf_broken": "Agordo estas rompita por servo {service} !",
|
||||
|
@ -549,7 +549,7 @@
|
|||
"diagnosis_swap_ok": "La sistemo havas {total} da interŝanĝoj!",
|
||||
"diagnosis_mail_ougoing_port_25_ok": "Eliranta haveno 25 ne estas blokita kaj retpoŝto povas esti sendita al aliaj serviloj.",
|
||||
"diagnosis_regenconf_allgood": "Ĉiuj agordaj dosieroj kongruas kun la rekomendita agordo!",
|
||||
"diagnosis_regenconf_manually_modified": "Agordodosiero {file} estis permane modifita.",
|
||||
"diagnosis_regenconf_manually_modified": "Agordodosiero <code>{file}</code> ŝajnas esti permane modifita.",
|
||||
"diagnosis_description_ip": "Interreta konektebleco",
|
||||
"diagnosis_description_dnsrecords": "Registroj DNS",
|
||||
"diagnosis_description_services": "Servo kontrolas staton",
|
||||
|
@ -557,27 +557,27 @@
|
|||
"diagnosis_description_security": "Sekurecaj kontroloj",
|
||||
"diagnosis_ports_could_not_diagnose": "Ne povis diagnozi, ĉu haveblaj havenoj de ekstere.",
|
||||
"diagnosis_ports_could_not_diagnose_details": "Eraro: {error}",
|
||||
"diagnosis_services_bad_status_tip": "Vi povas provi rekomenci la servon, kaj se ĝi ne funkcias, trarigardu la servajn protokolojn uzante 'yunohost service log {service}' aŭ tra la sekcio 'Servoj' de la retadreso.",
|
||||
"diagnosis_services_bad_status_tip": "Vi povas provi <a href='#/services/{service}'>rekomenci la servon </a>, kaj se ĝi ne funkcias, rigardu <a href='#/services/{service}'>La servaj registroj en reteja</a> (el la komandlinio, vi povas fari tion per <cmd>yunohost service restart {service}</cmd> kaj<cmd>yunohost service log {service}</cmd>).",
|
||||
"diagnosis_security_vulnerable_to_meltdown_details": "Por ripari tion, vi devas ĝisdatigi vian sistemon kaj rekomenci por ŝarĝi la novan linux-kernon (aŭ kontaktu vian servilan provizanton se ĉi tio ne funkcias). Vidu https://meltdownattack.com/ por pliaj informoj.",
|
||||
"diagnosis_description_basesystem": "Baza sistemo",
|
||||
"diagnosis_description_regenconf": "Sistemaj agordoj",
|
||||
"main_domain_change_failed": "Ne eblas ŝanĝi la ĉefan domajnon",
|
||||
"log_domain_main_domain": "Faru de '{}' la ĉefa domajno",
|
||||
"diagnosis_http_timeout": "Tempolimigita dum provado kontakti vian servilon de ekstere. Ĝi ŝajnas esti neatingebla. Vi devus kontroli, ke vi ĝuste redonas la havenon 80, ke nginx funkcias kaj ke fajroŝirmilo ne interbatalas.",
|
||||
"diagnosis_http_timeout": "Tempolimigita dum provado kontakti vian servilon de ekstere. Ĝi ŝajnas esti neatingebla.<br>1. La plej ofta kaŭzo por ĉi tiu afero estas, ke la haveno 80 (kaj 443) <a href='https://yunohost.org/isp_box_config'>ne estas ĝuste senditaj al via servilo</a>.<br>2. Vi ankaŭ devas certigi, ke la servo nginx funkcias<br>3. Pri pli kompleksaj agordoj: certigu, ke neniu fajroŝirmilo aŭ reverso-prokuro ne interbatalas.",
|
||||
"diagnosis_http_connection_error": "Rilata eraro: ne povis konektiĝi al la petita domajno, tre probable ĝi estas neatingebla.",
|
||||
"migration_description_0013_futureproof_apps_catalog_system": "Migru al la nova katalogosistemo pri estontecaj programoj",
|
||||
"diagnosis_ignored_issues": "(+ {nb_ignored} ignorataj aferoj))",
|
||||
"diagnosis_found_errors": "Trovis {errors} signifa(j) afero(j) rilata al {category}!",
|
||||
"diagnosis_found_errors_and_warnings": "Trovis {errors} signifaj problemo (j) (kaj {warnings} averto) rilataj al {category}!",
|
||||
"diagnosis_diskusage_low": "Stokado {mountpoint} (sur aparato {device)) restas nur {free} ({free_percent}%) spaco. Estu zorgema.",
|
||||
"diagnosis_diskusage_ok": "Stokado {mountpoint} (sur aparato {device) ankoraŭ restas {free} ({free_percent}%) spaco!",
|
||||
"diagnosis_diskusage_low": "Stokado <code>{mountpoint}</code> (sur aparato <code>{device}</code>) nur restas {free} ({free_percent}%) spaco restanta (el {total}). Estu zorgema.",
|
||||
"diagnosis_diskusage_ok": "Stokado <code>{mountpoint}</code> (sur aparato <code>{device}</code>) ankoraŭ restas {free} ({free_percent}%) spaco (el {total})!",
|
||||
"global_settings_setting_pop3_enabled": "Ebligu la protokolon POP3 por la poŝta servilo",
|
||||
"diagnosis_unknown_categories": "La jenaj kategorioj estas nekonataj: {categories}",
|
||||
"diagnosis_services_running": "Servo {service} funkcias!",
|
||||
"diagnosis_ports_unreachable": "Haveno {port} ne atingeblas de ekstere.",
|
||||
"diagnosis_ports_ok": "Haveno {port} atingeblas de ekstere.",
|
||||
"diagnosis_ports_needed_by": "Eksponi ĉi tiun havenon necesas por {category} funkcioj (servo {service})",
|
||||
"diagnosis_ports_forwarding_tip": "Por solvi ĉi tiun problemon, vi plej verŝajne bezonas agordi havenon en via interreta enkursigilo kiel priskribite en https://yunohost.org/isp_box_config",
|
||||
"diagnosis_ports_forwarding_tip": "Por solvi ĉi tiun problemon, vi plej verŝajne devas agordi la plusendon de haveno en via interreta enkursigilo kiel priskribite en <a href='https://yunohost.org/isp_box_config'>https://yunohost.org/isp_box_config</a>",
|
||||
"diagnosis_http_could_not_diagnose": "Ne povis diagnozi, ĉu atingeblas domajno de ekstere.",
|
||||
"diagnosis_http_could_not_diagnose_details": "Eraro: {error}",
|
||||
"diagnosis_http_ok": "Domajno {domain} atingebla per HTTP de ekster la loka reto.",
|
||||
|
@ -598,5 +598,43 @@
|
|||
"diagnosis_basesystem_hardware_board": "Servilo-tabulo-modelo estas {model}",
|
||||
"diagnosis_description_web": "Reta",
|
||||
"domain_cannot_add_xmpp_upload": "Vi ne povas aldoni domajnojn per 'xmpp-upload'. Ĉi tiu speco de nomo estas rezervita por la XMPP-alŝuta funkcio integrita en YunoHost.",
|
||||
"group_already_exist_on_system_but_removing_it": "Grupo {group} jam ekzistas en la sistemaj grupoj, sed YunoHost forigos ĝin …"
|
||||
"group_already_exist_on_system_but_removing_it": "Grupo {group} jam ekzistas en la sistemaj grupoj, sed YunoHost forigos ĝin …",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Iuj provizantoj ne lasos vin malŝlosi elirantan havenon 25 ĉar ili ne zorgas pri Neta Neŭtraleco.<br>- Iuj el ili provizas la alternativon de <a href='https://yunohost.org/#/smtp_relay'>uzante retpoŝtan servilon</a> kvankam ĝi implicas, ke la relajso povos spioni vian retpoŝtan trafikon.<br>- Amika privateco estas uzi VPN * kun dediĉita publika IP * por pretervidi ĉi tiun specon. de limoj. Vidu <a href='https://yunohost.org/#/vpn_avantage'>https://yunohost.org/#/vpn_avantage</a><br>- Vi ankaŭ povas konsideri ŝanĝi al <a href='https://yunohost.org/#/isp'>pli neta neŭtraleco-amika provizanto</a>",
|
||||
"diagnosis_mail_fcrdns_nok_details": "Vi unue provu agordi la inversan DNS kun <code>{ehlo_domain}</code> en via interreta enkursigilo aŭ en via retprovizanta interfaco. (Iuj gastigantaj provizantoj eble postulas, ke vi sendu al ili subtenan bileton por ĉi tio).",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_4": "Iuj provizantoj ne lasos vin agordi vian inversan DNS (aŭ ilia funkcio povus esti rompita ...). Se vi spertas problemojn pro tio, konsideru jenajn solvojn:<br>- Iuj ISP provizas la alternativon de <a href='https://yunohost.org/#/smtp_relay'>uzante retpoŝtan servilon</a> kvankam ĝi implicas, ke la relajso povos spioni vian retpoŝtan trafikon.<br>- Interreta privateco estas uzi VPN * kun dediĉita publika IP * por preterpasi ĉi tiajn limojn. Vidu <a href='https://yunohost.org/#/vpn_avantage'>https://yunohost.org/#/vpn_avantage</a><br>- Finfine eblas ankaŭ <a href='https://yunohost.org/#/isp'>ŝanĝo de provizanto</a>",
|
||||
"diagnosis_display_tip": "Por vidi la trovitajn problemojn, vi povas iri al la sekcio pri Diagnozo de la reteja administrado, aŭ funkcii \"yunohost diagnosis show --issues\" el la komandlinio.",
|
||||
"diagnosis_ip_global": "Tutmonda IP: <code>{global} </code>",
|
||||
"diagnosis_ip_local": "Loka IP: <code>{local} </code>",
|
||||
"diagnosis_dns_point_to_doc": "Bonvolu kontroli la dokumentaron ĉe <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> se vi bezonas helpon pri agordo de DNS-registroj.",
|
||||
"diagnosis_mail_outgoing_port_25_ok": "La SMTP-poŝta servilo kapablas sendi retpoŝtojn (eliranta haveno 25 ne estas blokita).",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_details": "Vi unue provu malŝlosi elirantan havenon 25 en via interreta enkursigilo aŭ en via retprovizanta interfaco. (Iuj gastigantaj provizantoj eble postulas, ke vi sendu al ili subtenan bileton por ĉi tio).",
|
||||
"diagnosis_mail_ehlo_unreachable": "La SMTP-poŝta servilo estas neatingebla de ekstere sur IPv {ipversion}. Ĝi ne povos ricevi retpoŝtojn.",
|
||||
"diagnosis_mail_ehlo_ok": "La SMTP-poŝta servilo atingeblas de ekstere kaj tial kapablas ricevi retpoŝtojn !",
|
||||
"diagnosis_mail_ehlo_unreachable_details": "Ne povis malfermi rilaton sur la haveno 25 al via servilo en IPv {ipversion}. Ĝi ŝajnas esti neatingebla.<br>1. La plej ofta kaŭzo por ĉi tiu afero estas, ke la haveno 25 <a href='https://yunohost.org/isp_box_config'>ne estas ĝuste sendita al via servilo </a>.<br>2. Vi ankaŭ devas certigi, ke servo-prefikso funkcias.<br>3. Pri pli kompleksaj agordoj: certigu, ke neniu fajroŝirmilo aŭ reverso-prokuro ne interbatalas.",
|
||||
"diagnosis_mail_ehlo_bad_answer": "Ne-SMTP-servo respondita sur la haveno 25 sur IPv {ipversion}",
|
||||
"diagnosis_mail_ehlo_bad_answer_details": "Povas esti ke alia maŝino respondas anstataŭ via servilo.",
|
||||
"diagnosis_mail_ehlo_wrong": "Malsama SMTP-poŝta servilo respondas pri IPv {ipversion}. Via servilo probable ne povos ricevi retpoŝtojn.",
|
||||
"diagnosis_mail_ehlo_wrong_details": "La EHLO ricevita de la fora diagnozilo en IPv {ipversion} diferencas de la domajno de via servilo.<br>Ricevita EHLO: <code>{wrong_ehlo} </code><br>Atendita: <code>{right_ehlo} </code><br>La plej ofta kaŭzo por ĉi tiu afero estas, ke la haveno 25 <a href='https://yunohost.org/isp_box_config'>ne estas ĝuste sendita al via servilo </a>. Alternative, certigu, ke neniu fajroŝirmilo aŭ reverso-prokuro ne interbatalas.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose": "Ne povis diagnozi ĉu postfiksa poŝta servilo atingebla de ekstere en IPv {ipversion}.",
|
||||
"diagnosis_mail_ehlo_could_not_diagnose_details": "Eraro: {error}",
|
||||
"diagnosis_mail_fcrdns_ok": "Via inversa DNS estas ĝuste agordita!",
|
||||
"diagnosis_mail_fcrdns_dns_missing": "Neniu inversa DNS estas difinita en IPv {ipversion}. Iuj retpoŝtoj povas malsukcesi liveri aŭ povus esti markitaj kiel spamo.",
|
||||
"diagnosis_mail_fcrdns_nok_alternatives_6": "Iuj provizantoj ne lasos vin agordi vian inversan DNS (aŭ ilia funkcio povus esti rompita ...). Se via inversa DNS estas ĝuste agordita por IPv4, vi povas provi malebligi la uzon de IPv6 kiam vi sendas retpoŝtojn per funkciado <cmd>yunohost-agordoj set smtp.allow_ipv6 -v off </cmd>. Noto: ĉi tiu lasta solvo signifas, ke vi ne povos sendi aŭ ricevi retpoŝtojn de la malmultaj IPv6-nur serviloj tie.",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain": "La inversa DNS ne ĝuste agordis en IPv {ipversion}. Iuj retpoŝtoj povas malsukcesi liveri aŭ povus esti markitaj kiel spamo.",
|
||||
"diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "Aktuala reverso DNS: <code>{rdns_domain}</code><br>Atendita valoro: <code>{ehlo_domain}</code>",
|
||||
"diagnosis_mail_blacklist_ok": "La IP kaj domajnoj uzataj de ĉi tiu servilo ne ŝajnas esti listigitaj nigre",
|
||||
"diagnosis_mail_blacklist_listed_by": "Via IP aŭ domajno <code>{item}</code> estas listigita en {blacklist_name}",
|
||||
"diagnosis_mail_blacklist_reason": "La negra listo estas: {reason}",
|
||||
"diagnosis_mail_blacklist_website": "Post identigi kial vi listigas kaj riparis ĝin, bonvolu peti forigi vian IP aŭ domenion sur {blacklist_website}",
|
||||
"diagnosis_mail_queue_ok": "{nb_pending} pritraktataj retpoŝtoj en la retpoŝtaj vostoj",
|
||||
"diagnosis_mail_queue_unavailable": "Ne povas konsulti multajn pritraktitajn retpoŝtojn en vosto",
|
||||
"diagnosis_mail_queue_unavailable_details": "Eraro: {error}",
|
||||
"diagnosis_mail_queue_too_big": "Tro multaj pritraktataj retpoŝtoj en retpoŝto ({nb_pending} retpoŝtoj)",
|
||||
"diagnosis_ports_partially_unreachable": "Haveno {port} ne atingebla de ekstere en IPv {failed}.",
|
||||
"diagnosis_http_hairpinning_issue": "Via loka reto ŝajne ne havas haŭtadon.",
|
||||
"diagnosis_http_hairpinning_issue_details": "Ĉi tio probable estas pro via ISP-skatolo / enkursigilo. Rezulte, homoj de ekster via loka reto povos aliri vian servilon kiel atendite, sed ne homoj de interne de la loka reto (kiel vi, probable?) Kiam uzas la domajnan nomon aŭ tutmondan IP. Eble vi povas plibonigi la situacion per rigardado al <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a>",
|
||||
"diagnosis_http_partially_unreachable": "Domajno {domain} ŝajnas neatingebla per HTTP de ekster la loka reto en IPv {failed}, kvankam ĝi funkcias en IPv {passed}.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date": "La nginx-agordo de ĉi tiu domajno ŝajnas esti modifita permane, kaj malhelpas YunoHost diagnozi ĉu ĝi atingeblas per HTTP.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date_details": "Por solvi la situacion, inspektu la diferencon per la komandlinio per <cmd>yunohost tools regen-conf nginx --dry-run --with-diff</cmd> kaj se vi aranĝas, apliku la ŝanĝojn per <cmd>yunohost tools regen-conf nginx --force</cmd>.",
|
||||
"global_settings_setting_smtp_allow_ipv6": "Permesu la uzon de IPv6 por ricevi kaj sendi poŝton"
|
||||
}
|
||||
|
|
|
@ -601,5 +601,9 @@
|
|||
"diagnosis_ports_forwarding_tip": "Para solucionar este incidente, debería configurar el \"port forwading\" en su router como especificado en https://yunohost.org/isp_box_config",
|
||||
"certmanager_warning_subdomain_dns_record": "El subdominio '{subdomain:s}' no se resuelve en la misma dirección IP que '{domain:s}'. Algunas funciones no estarán disponibles hasta que solucione esto y regenere el certificado.",
|
||||
"domain_cannot_add_xmpp_upload": "No puede agregar dominios que comiencen con 'xmpp-upload'. Este tipo de nombre está reservado para la función de carga XMPP integrada en YunoHost.",
|
||||
"yunohost_postinstall_end_tip": "¡La post-instalación completada! Para finalizar su configuración, considere:\n - agregar un primer usuario a través de la sección 'Usuarios' del webadmin (o 'yunohost user create <username>' en la línea de comandos);\n - diagnostique problemas potenciales a través de la sección 'Diagnóstico' de webadmin (o 'ejecución de diagnóstico yunohost' en la línea de comandos);\n - leyendo las partes 'Finalizando su configuración' y 'Conociendo a Yunohost' en la documentación del administrador: https://yunohost.org/admindoc."
|
||||
"yunohost_postinstall_end_tip": "¡La post-instalación completada! Para finalizar su configuración, considere:\n - agregar un primer usuario a través de la sección 'Usuarios' del webadmin (o 'yunohost user create <username>' en la línea de comandos);\n - diagnostique problemas potenciales a través de la sección 'Diagnóstico' de webadmin (o 'ejecución de diagnóstico yunohost' en la línea de comandos);\n - leyendo las partes 'Finalizando su configuración' y 'Conociendo a Yunohost' en la documentación del administrador: https://yunohost.org/admindoc.",
|
||||
"diagnosis_dns_point_to_doc": "Por favor, consulta la documentación en <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si necesitas ayuda para configurar los registros DNS.",
|
||||
"diagnosis_ip_global": "IP Global: <code>{global}</code>",
|
||||
"diagnosis_mail_outgoing_port_25_ok": "El servidor de email SMTP puede mandar emails (puerto saliente 25 no está bloqueado).",
|
||||
"diagnosis_mail_outgoing_port_25_blocked_details": "Deberías intentar desbloquear el puerto 25 saliente en la interfaz de tu router o en la interfaz de tu provedor de hosting. (Algunos hosting pueden necesitar que les abras un ticket de soporte para esto)."
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
"domain_dyndns_already_subscribed": "Vous avez déjà souscris à un domaine DynDNS",
|
||||
"domain_dyndns_root_unknown": "Domaine DynDNS principal inconnu",
|
||||
"domain_exists": "Le domaine existe déjà",
|
||||
"domain_uninstall_app_first": "Une ou plusieurs applications sont installées sur ce domaine. Veuillez d’abord les désinstaller avant de supprimer ce domaine",
|
||||
"domain_uninstall_app_first": "Ces applications sont toujours installées sur votre domaine: {apps}. Veuillez d’abord les désinstaller avant de supprimer ce domaine",
|
||||
"domain_unknown": "Domaine inconnu",
|
||||
"done": "Terminé",
|
||||
"downloading": "Téléchargement en cours …",
|
||||
|
@ -89,7 +89,7 @@
|
|||
"mail_domain_unknown": "Le domaine '{domain:s}' de cette adresse de courriel n’est pas valide. Merci d’utiliser un domaine administré par ce serveur.",
|
||||
"mail_forward_remove_failed": "Impossible de supprimer le courriel de transfert '{mail:s}'",
|
||||
"main_domain_change_failed": "Impossible de modifier le domaine principal",
|
||||
"main_domain_changed": "Le domaine principal modifié",
|
||||
"main_domain_changed": "Le domaine principal a été modifié",
|
||||
"no_internet_connection": "Le serveur n’est pas connecté à Internet",
|
||||
"not_enough_disk_space": "L’espace disque est insuffisant sur '{path:s}'",
|
||||
"package_unknown": "Le paquet '{pkgname}' est inconnu",
|
||||
|
@ -166,9 +166,9 @@
|
|||
"certmanager_certificate_fetching_or_enabling_failed": "Il semble que l’activation du nouveau certificat pour {domain:s} a échoué …",
|
||||
"certmanager_attempt_to_renew_nonLE_cert": "Le certificat pour le domaine {domain:s} n’est pas émis par Let’s Encrypt. Impossible de le renouveler automatiquement !",
|
||||
"certmanager_attempt_to_renew_valid_cert": "Le certificat pour le domaine {domain:s} n’est pas sur le point d’expirer ! (Vous pouvez utiliser --force si vous savez ce que vous faites)",
|
||||
"certmanager_domain_http_not_working": "Il semble que le domaine {domain:s} ne soit pas accessible via HTTP. Veuillez vérifier que vos configuration DNS et Nginx sont correctes",
|
||||
"certmanager_domain_http_not_working": "Le domaine {domain:s} ne semble pas être accessible via HTTP. Merci de vérifier la catégorie 'Web' dans le diagnostic pour plus d'informations. (Ou si vous savez ce que vous faites, utilisez '--no-checks' pour désactiver la vérification.)",
|
||||
"certmanager_error_no_A_record": "Aucun enregistrement DNS 'A' n’a été trouvé pour {domain:s}. Vous devez faire pointer votre nom de domaine vers votre machine pour être en mesure d’installer un certificat Let’s Encrypt ! (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrement DNS 'A' du domaine {domain:s} est différent de l’adresse IP de ce serveur. Si vous avez récemment modifié votre enregistrement 'A', veuillez attendre sa propagation (des vérificateurs de propagation DNS sont disponibles en ligne). (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)",
|
||||
"certmanager_domain_dns_ip_differs_from_public_ip": "L'enregistrement DNS du domaine {domain:s} est différent de l’adresse IP de ce serveur. Pour plus d'informations, veuillez consulter la catégorie \"Enregistrements DNS\" dans la section diagnostic. Si vous avez récemment modifié votre enregistrement 'A', veuillez attendre sa propagation (des vérificateurs de propagation DNS sont disponibles en ligne). (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)",
|
||||
"certmanager_cannot_read_cert": "Quelque chose s’est mal passé lors de la tentative d’ouverture du certificat actuel pour le domaine {domain:s} (fichier : {file:s}), la cause est : {reason:s}",
|
||||
"certmanager_cert_install_success_selfsigned": "Le certificat auto-signé est maintenant installé pour le domaine « {domain:s} »",
|
||||
"certmanager_cert_install_success": "Le certificat Let’s Encrypt est maintenant installé pour le domaine « {domain:s} »",
|
||||
|
@ -184,11 +184,11 @@
|
|||
"mailbox_used_space_dovecot_down": "Le service de courriel Dovecot doit être démarré si vous souhaitez voir l’espace disque occupé par la messagerie",
|
||||
"domains_available": "Domaines disponibles :",
|
||||
"backup_archive_broken_link": "Impossible d’accéder à l’archive de sauvegarde (lien invalide vers {path:s})",
|
||||
"certmanager_acme_not_configured_for_domain": "Le certificat du domaine {domain:s} ne semble pas être correctement installé. Veuillez d’abord exécuter cert-install.",
|
||||
"certmanager_acme_not_configured_for_domain": "Le challenge ACME n'a pas pu être validé pour le domaine {domain} pour le moment car le code de la configuration nginx est manquant... Merci de vérifier que votre configuration nginx est à jour avec la commande: `yunohost tools regen-conf nginx --dry-run --with-diff`.",
|
||||
"certmanager_http_check_timeout": "Expiration du délai lorsque le serveur a essayé de se contacter lui-même via HTTP en utilisant l’adresse IP public {ip:s} du domaine {domain:s}. Vous rencontrez peut-être un problème d’hairpinning ou alors le pare-feu/routeur en amont de votre serveur est mal configuré.",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Expiration du délai lors de la tentative de récupération du certificat intermédiaire depuis Let’s Encrypt. L’installation ou le renouvellement du certificat a été annulé. Veuillez réessayer plus tard.",
|
||||
"domain_hostname_failed": "Échec de l’utilisation d’un nouveau nom d’hôte. Cela pourrait causer des soucis plus tard (cela n’en causera peut-être pas).",
|
||||
"yunohost_ca_creation_success": "L’autorité de certification locale créée.",
|
||||
"yunohost_ca_creation_success": "L'autorité de certification locale a été créée.",
|
||||
"app_already_installed_cant_change_url": "Cette application est déjà installée. L’URL ne peut pas être changé simplement par cette fonction. Vérifiez si cela est disponible avec `app changeurl`.",
|
||||
"app_change_url_failed_nginx_reload": "Le redémarrage de Nginx a échoué. Voici la sortie de 'nginx -t' :\n{nginx_errors:s}",
|
||||
"app_change_url_identical_domains": "L’ancien et le nouveau couple domaine/chemin_de_l’URL sont identiques pour ('{domain:s}{path:s}'), rien à faire.",
|
||||
|
@ -333,7 +333,7 @@
|
|||
"log_tools_shutdown": "Éteindre votre serveur",
|
||||
"log_tools_reboot": "Redémarrer votre serveur",
|
||||
"mail_unavailable": "Cette adresse de courriel est réservée et doit être automatiquement attribuée au tout premier utilisateur",
|
||||
"migration_description_0004_php5_to_php7_pools": "Reconfigurer les espaces utilisateurs PHP pour utiliser PHP 7 au lieu de PHP 5",
|
||||
"migration_description_0004_php5_to_php7_pools": "Reconfigurer l'ensemble PHP pour utiliser PHP 7 au lieu de PHP 5",
|
||||
"migration_description_0005_postgresql_9p4_to_9p6": "Migration des bases de données de PostgreSQL 9.4 vers PostgreSQL 9.6",
|
||||
"migration_0005_postgresql_94_not_installed": "PostgreSQL n’a pas été installé sur votre système. Rien à faire !",
|
||||
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 est installé, mais pas PostgreSQL 9.6 ‽ Quelque chose de bizarre aurait pu se produire sur votre système :(…",
|
||||
|
@ -364,7 +364,7 @@
|
|||
"confirm_app_install_warning": "Avertissement : cette application peut fonctionner mais n’est pas bien intégrée dans YunoHost. Certaines fonctionnalités telles que l’authentification unique et la sauvegarde/restauration peuvent ne pas être disponibles. L’installer quand même ? [{answers:s}] ",
|
||||
"confirm_app_install_danger": "DANGER ! Cette application est connue pour être encore expérimentale (si elle ne fonctionne pas explicitement) ! Vous ne devriez probablement PAS l’installer à moins de savoir ce que vous faites. AUCUN SUPPORT ne sera fourni si cette application ne fonctionne pas ou casse votre système … Si vous êtes prêt à prendre ce risque de toute façon, tapez '{answers:s}'",
|
||||
"confirm_app_install_thirdparty": "DANGER! Cette application ne fait pas partie du catalogue d'applications de Yunohost. L'installation d'applications tierces peut compromettre l'intégrité et la sécurité de votre système. Vous ne devriez probablement PAS l'installer à moins de savoir ce que vous faites. AUCUN SUPPORT ne sera fourni si cette application ne fonctionne pas ou casse votre système ... Si vous êtes prêt à prendre ce risque de toute façon, tapez '{answers:s}'",
|
||||
"dpkg_is_broken": "Vous ne pouvez pas faire ça maintenant car dpkg/apt (le gestionnaire de paquets du système) semble avoir laissé des choses non configurées. Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo dpkg --configure -a'.",
|
||||
"dpkg_is_broken": "Vous ne pouvez pas faire ça maintenant car dpkg/apt (le gestionnaire de paquets du système) semble avoir laissé des choses non configurées. Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo apt install --fix-broken` et/ou `sudo dpkg --configure -a'.",
|
||||
"dyndns_could_not_check_available": "Impossible de vérifier si {domain:s} est disponible chez {provider:s}.",
|
||||
"file_does_not_exist": "Le fichier dont le chemin est {path:s} n’existe pas.",
|
||||
"global_settings_setting_security_password_admin_strength": "Qualité du mot de passe administrateur",
|
||||
|
@ -390,9 +390,9 @@
|
|||
"service_restarted": "Le service « {service:s} » a été redémarré",
|
||||
"service_reload_or_restart_failed": "Impossible de recharger ou de redémarrer le service '{service:s}'\n\nJournaux historisés récents de ce service : {logs:s}",
|
||||
"service_reloaded_or_restarted": "Le service « {service:s} » a été rechargé ou redémarré",
|
||||
"this_action_broke_dpkg": "Cette action a laissé des paquets non configurés par dpkg/apt (les gestionnaires de paquets système). Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo dpkg --configure -a`.",
|
||||
"this_action_broke_dpkg": "Cette action a laissé des paquets non configurés par dpkg/apt (les gestionnaires de paquets système). Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo apt install --fix-broken` et/ou `sudo dpkg --configure -a`.",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "Ces services requis doivent être en cours d’exécution pour exécuter cette action : {services}. Essayez de les redémarrer pour continuer (et éventuellement rechercher pourquoi ils sont en panne).",
|
||||
"admin_password_too_long": "Veuillez choisir un mot de passe de moins de 127 caractères",
|
||||
"admin_password_too_long": "Veuillez choisir un mot de passe comportant moins de 127 caractères",
|
||||
"log_regen_conf": "Régénérer les configurations du système '{}'",
|
||||
"migration_0009_not_needed": "Cette migration semble avoir déjà été jouée ? On l’ignore.",
|
||||
"regenconf_file_backed_up": "Le fichier de configuration '{conf}' a été sauvegardé sous '{backup}'",
|
||||
|
@ -426,7 +426,7 @@
|
|||
"tools_upgrade_special_packages_completed": "La mise à jour des paquets de YunoHost est finie !\nPressez [Entrée] pour revenir à la ligne de commande",
|
||||
"dpkg_lock_not_available": "Cette commande ne peut pas être exécutée pour le moment car un autre programme semble utiliser le verrou de dpkg (le gestionnaire de package système)",
|
||||
"tools_upgrade_cant_unhold_critical_packages": "Impossible de conserver les paquets critiques…",
|
||||
"tools_upgrade_special_packages_explanation": "La mise à niveau spéciale se poursuivra en arrière-plan. Veuillez ne pas lancer d'autres actions sur votre serveur pendant les 10 prochaines minutes (selon la vitesse du matériel). Après cela, vous devrez peut-être vous reconnecter à l'administrateur Web. Le journal de mise à niveau sera disponible dans Outils → Journal (dans le webadmin) ou en utilisant la « liste des journaux yunohost » (à partir de la ligne de commande).",
|
||||
"tools_upgrade_special_packages_explanation": "La mise à niveau spécifique à YunoHost se poursuivra en arrière-plan. Veuillez ne pas lancer d'autres actions sur votre serveur pendant les 10 prochaines minutes (selon la vitesse du matériel). Après cela, vous devrez peut-être vous reconnecter à l'administrateur Web. Le journal de mise à niveau sera disponible dans Outils → Journal (dans le webadmin) ou en utilisant la « liste des journaux yunohost » (à partir de la ligne de commande).",
|
||||
"update_apt_cache_failed": "Impossible de mettre à jour le cache APT (gestionnaire de paquets Debian). Voici un extrait du fichier sources.list qui pourrait vous aider à identifier les lignes problématiques :\n{sourceslist}",
|
||||
"update_apt_cache_warning": "Des erreurs se sont produites lors de la mise à jour du cache APT (gestionnaire de paquets Debian). Voici un extrait des lignes du fichier sources.list qui pourrait vous aider à identifier les lignes problématiques :\n{sourceslist}",
|
||||
"backup_permission": "Permission de sauvegarde pour l’application {app:s}",
|
||||
|
@ -509,7 +509,7 @@
|
|||
"diagnosis_ip_not_connected_at_all": "Le serveur ne semble pas du tout connecté à Internet !?",
|
||||
"diagnosis_ip_weird_resolvconf": "La résolution DNS semble fonctionner, mais il semble que vous utilisez un <code>/etc/resolv.conf</code> personnalisé.",
|
||||
"diagnosis_ip_weird_resolvconf_details": "Le fichier <code>/etc/resolv.conf</code> doit être un lien symbolique vers <code>/etc/resolvconf/run/resolv.conf</code> lui-même pointant vers <code>127.0.0.1</code> (dnsmasq). Si vous souhaitez configurer manuellement les résolveurs DNS, veuillez modifier <code>/etc/resolv.dnsmasq.conf</code>.",
|
||||
"diagnosis_dns_missing_record": "Selon la configuration DNS recommandée, vous devez ajouter un enregistrement DNS\nType : {type}\nNom : {name}\nValeur : {value}",
|
||||
"diagnosis_dns_missing_record": "Selon la configuration DNS recommandée, vous devez ajouter un enregistrement DNS<br>Type : <code>{type}</code><br>Nom : <code>{name}</code><br>Valeur: <code>{value}</code>",
|
||||
"diagnosis_diskusage_ok": "L’espace de stockage <code>{mountpoint}</code> (sur le périphérique <code>{device}</code>) a encore {free} ({free_percent}%) espace restant (sur {total}) !",
|
||||
"diagnosis_ram_ok": "Le système dispose encore de {available} ({available_percent}%) de RAM sur {total}.",
|
||||
"diagnosis_regenconf_allgood": "Tous les fichiers de configuration sont conformes à la configuration recommandée !",
|
||||
|
@ -535,7 +535,7 @@
|
|||
"diagnosis_ip_broken_resolvconf": "La résolution du nom de domaine semble être cassée sur votre serveur, ce qui semble lié au fait que <code>/etc/resolv.conf</code> ne pointe pas vers <code>127.0.0.1</code>.",
|
||||
"diagnosis_dns_good_conf": "Les enregistrements DNS sont correctement configurés pour le domaine {domain} (catégorie {category})",
|
||||
"diagnosis_dns_bad_conf": "Certains enregistrements DNS sont manquants ou incorrects pour le domaine {domain} (catégorie {category})",
|
||||
"diagnosis_dns_discrepancy": "L’enregistrement DNS de type {type} et nom {name} ne correspond pas à la configuration recommandée.\nValeur actuelle : {current}\nValeur attendue : {value}",
|
||||
"diagnosis_dns_discrepancy": "Cet enregistrement DNS ne semble pas correspondre à la configuration recommandée : <br>Type : <code>{type}</code><br>Nom : <code>{name}</code><br> La valeur actuelle est : <code>{current}</code><br> La valeur attendue est : <code>{value}</code>",
|
||||
"diagnosis_services_bad_status": "Le service {service} est {status} :-(",
|
||||
"diagnosis_diskusage_verylow": "L'espace de stockage <code>{mountpoint}</code> (sur l’appareil <code>{device}</code>) ne dispose que de {free} ({free_percent}%) espace restant (sur {total}). Vous devriez vraiment envisager de nettoyer de l’espace !",
|
||||
"diagnosis_diskusage_low": "L'espace de stockage <code>{mountpoint}</code> (sur l'appareil <code>{device}</code>) ne dispose que de {free} ({free_percent}%) espace restant (sur {total}). Faites attention.",
|
||||
|
@ -618,7 +618,7 @@
|
|||
"diagnosis_mail_queue_too_big": "Trop d’e-mails en attente dans la file d'attente ({nb_pending} e-mails)",
|
||||
"global_settings_setting_smtp_allow_ipv6": "Autoriser l'utilisation d’IPv6 pour recevoir et envoyer du courrier",
|
||||
"diagnosis_security_all_good": "Aucune vulnérabilité de sécurité critique n’a été trouvée.",
|
||||
"diagnosis_display_tip": "Pour voir les problèmes détectés, vous pouvez accéder à la section Diagnostic du webadmin ou exécuter « yunohost diagnostic show --issues » à partir de la ligne de commande.",
|
||||
"diagnosis_display_tip": "Pour voir les problèmes détectés, vous pouvez accéder à la section Diagnostic du webadmin ou exécuter « yunohost diagnosis show --issues » à partir de la ligne de commande.",
|
||||
"diagnosis_ip_global": "IP globale : <code>{global}</code>",
|
||||
"diagnosis_ip_local": "IP locale : <code>{local}</code>",
|
||||
"diagnosis_dns_point_to_doc": "Veuillez consulter la documentation sur <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si vous avez besoin d’aide pour configurer les enregistrements DNS.",
|
||||
|
@ -637,5 +637,36 @@
|
|||
"diagnosis_http_hairpinning_issue_details": "C'est probablement à cause de la box/routeur de votre fournisseur d'accès internet. Par conséquent, les personnes extérieures à votre réseau local pourront accéder à votre serveur comme prévu, mais pas les personnes internes au réseau local (comme vous, probablement ?) si elles utilisent le nom de domaine ou l'IP globale. Vous pourrez peut-être améliorer la situation en consultant <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a>",
|
||||
"diagnosis_http_partially_unreachable": "Le domaine {domain} semble inaccessible en HTTP depuis l’extérieur du réseau local en IPv{failed}, bien qu’il fonctionne en IPv{passed}.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date": "La configuration Nginx de ce domaine semble avoir été modifiée manuellement et empêche YunoHost de diagnostiquer si elle est accessible en HTTP.",
|
||||
"diagnosis_http_nginx_conf_not_up_to_date_details": "Pour corriger la situation, inspectez la différence avec la ligne de commande en utilisant les outils <cmd>yunohost regen-conf nginx --dry-run --with-diff</cmd> et si vous êtes d’accord, appliquez les modifications avec <cmd>yunohost tools regen-conf nginx --force</cmd>."
|
||||
"diagnosis_http_nginx_conf_not_up_to_date_details": "Pour corriger la situation, inspectez la différence avec la ligne de commande en utilisant les outils <cmd>yunohost tools regen-conf nginx --dry-run --with-diff</cmd> et si vous êtes d’accord, appliquez les modifications avec <cmd>yunohost tools regen-conf nginx --force</cmd>.",
|
||||
"backup_archive_cant_retrieve_info_json": "Impossible d'avoir des informations sur l'archive '{archive}' ... Le fichier info.json ne peut pas être trouvé (ou n'est pas un fichier json valide).",
|
||||
"backup_archive_corrupted": "Il semble que l'archive de la sauvegarde '{archive}' est corrompue : {error}",
|
||||
"diagnosis_ip_no_ipv6_tip": "L'utilisation de IPv6 n'est pas obligatoire pour le fonctionnement de votre serveur, mais cela contribue à la santé d'Internet dans son ensemble. IPv6 généralement configuré automatiquement par votre système ou votre FAI s'il est disponible. Autrement, vous devrez prendre quelque minutes pour le configurer manuellement à l'aide de cette documentation: <a href='https://yunohost.org/#/ipv6'>https://yunohost.org/#/ipv6</a>. Si vous ne pouvez pas activer IPv6 ou si c'est trop technique pour vous, vous pouvez aussi ignorer cet avertissement sans que cela pose problème.",
|
||||
"diagnosis_domain_expiration_not_found": "Impossible de vérifier la date d'expiration de certains domaines",
|
||||
"diagnosis_domain_expiration_not_found_details": "Les informations WHOIS pour le domaine {domain} ne semblent pas contenir les informations concernant la date d'expiration ?",
|
||||
"diagnosis_domain_not_found_details": "Le domaine {domain} n'existe pas dans la base de donnée WHOIS ou est expiré !",
|
||||
"diagnosis_domain_expiration_success": "Vos domaines sont enregistrés et ne vont pas expirer prochainement.",
|
||||
"diagnosis_domain_expiration_warning": "Certains domaines vont expirer prochainement !",
|
||||
"diagnosis_domain_expiration_error": "Certains domaines vont expirer TRÈS PROCHAINEMENT !",
|
||||
"diagnosis_domain_expires_in": "{domain} expire dans {days} jours.",
|
||||
"certmanager_domain_not_diagnosed_yet": "Il n'y a pas encore de résultat de diagnostic pour le domaine %s. Merci de relancer un diagnostic pour les catégories 'Enregistrements DNS' et 'Web' dans la section Diagnostique pour vérifier si le domaine est prêt pour Let's Encrypt. (Ou si vous savez ce que vous faites, utilisez '--no-checks' pour désactiver la vérification.)",
|
||||
"diagnosis_swap_tip": "Merci d'être prudent et conscient que si vous hébergez une partition SWAP sur une carte SD ou un disque SSD, cela risque de réduire drastiquement l’espérance de vie du périphérique.",
|
||||
"restore_already_installed_apps": "Les applications suivantes ne peuvent pas être restaurées car elles sont déjà installées : {apps}",
|
||||
"regenconf_need_to_explicitly_specify_ssh": "La configuration de ssh a été modifiée manuellement. Vous devez explicitement indiquer la mention --force à \"ssh\" pour appliquer les changements.",
|
||||
"migration_0015_cleaning_up": "Nettoyage du cache et des paquets qui ne sont plus utiles …",
|
||||
"migration_0015_specific_upgrade": "Commencement de la mise à jour des paquets du système qui doivent être mis à jour séparément …",
|
||||
"migration_0015_modified_files": "Veuillez noter que les fichiers suivants ont été modifiés manuellement et pourraient être écrasés à la suite de la mise à niveau : {manually_modified_files}",
|
||||
"migration_0015_problematic_apps_warning": "Veuillez noter que des applications qui peuvent poser problèmes ont été détectées. Il semble qu'elles n'aient pas été installées à partir du catalogue d'applications YunoHost, ou bien qu'elles ne soient pas signalées comme \"fonctionnelles\". Par conséquent, il n'est pas possible de garantir que les applications suivantes fonctionneront encore après la mise à niveau : {problematic_apps}",
|
||||
"migration_0015_general_warning": "Veuillez noter que cette migration est une opération délicate. L'équipe YunoHost a fait de son mieux pour la revérifier et la tester, mais la migration pourrait quand même casser des éléments du système ou de ses applications.\n\nIl est donc recommandé :\n…- de faire une sauvegarde de toute donnée ou application critique. Plus d'informations ici https://yunohost.org/backup ;\n…- d'être patient après le lancement de la migration. Selon votre connexion internet et votre matériel, la mise à niveau peut prendre jusqu'à quelques heures.",
|
||||
"migration_0015_system_not_fully_up_to_date": "Votre système n'est pas entièrement à jour. Veuillez effectuer une mise à jour normale avant de lancer la migration vers Buster.",
|
||||
"migration_0015_not_enough_free_space": "L'espace libre est très faible dans /var/ ! Vous devriez avoir au moins 1 Go de libre pour effectuer cette migration.",
|
||||
"migration_0015_not_stretch": "La distribution Debian actuelle n'est pas Stretch !",
|
||||
"migration_0015_yunohost_upgrade": "Démarrage de la mise à jour de YunoHost …",
|
||||
"migration_0015_still_on_stretch_after_main_upgrade": "Quelque chose s'est mal passé lors de la mise à niveau, le système semble toujours être sous Debian Stretch",
|
||||
"migration_0015_main_upgrade": "Démarrage de la mise à niveau générale …",
|
||||
"migration_0015_patching_sources_list": "Mise à jour du fichier sources.lists …",
|
||||
"migration_0015_start": "Démarrage de la migration vers Buster",
|
||||
"migration_description_0015_migrate_to_buster": "Mise à niveau du système vers Debian Buster et YunoHost 4.x",
|
||||
"diagnosis_dns_try_dyndns_update_force": "La configuration DNS de ce domaine devrait être automatiquement gérée par Yunohost. Si ce n'est pas le cas, vous pouvez essayer de forcer une mise à jour en utilisant <cmd>yunohost dyndns update --force</cmd>.",
|
||||
"app_packaging_format_not_supported": "Cette application ne peut pas être installée car son format n'est pas pris en charge par votre version de Yunohost. Vous devriez probablement envisager de mettre à jour votre système.",
|
||||
"migration_0015_weak_certs": "Il a été constaté que les certificats suivants utilisent encore des algorithmes de signature peu robustes et doivent être mis à jour pour être compatibles avec la prochaine version de nginx : {certs}"
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"app_already_installed": "{app:s} è già installata",
|
||||
"app_extraction_failed": "Impossibile estrarre i file di installazione",
|
||||
"app_not_installed": "{app:s} non è installata",
|
||||
"app_not_installed": "Impossibile trovare l'applicazione '{app:s}' nell'elenco delle applicazioni installate: {all_apps}",
|
||||
"app_unknown": "Applicazione sconosciuta",
|
||||
"ask_email": "Indirizzo email",
|
||||
"ask_password": "Password",
|
||||
|
@ -27,16 +27,16 @@
|
|||
"user_deleted": "L'utente è stato cancellato",
|
||||
"admin_password": "Password dell'amministrazione",
|
||||
"admin_password_change_failed": "Impossibile cambiare la password",
|
||||
"admin_password_changed": "La password dell'amministrazione è stata cambiata",
|
||||
"app_install_files_invalid": "Non sono validi i file di installazione",
|
||||
"app_manifest_invalid": "Manifesto dell'applicazione non valido: {error}",
|
||||
"admin_password_changed": "La password d'amministrazione è stata cambiata",
|
||||
"app_install_files_invalid": "Questi file non possono essere installati",
|
||||
"app_manifest_invalid": "C'è qualcosa di scorretto nel manifesto dell'applicazione: {error}",
|
||||
"app_not_correctly_installed": "{app:s} sembra di non essere installata correttamente",
|
||||
"app_not_properly_removed": "{app:s} non è stata correttamente rimossa",
|
||||
"action_invalid": "L'azione '{action:s}' non è valida",
|
||||
"app_removed": "{app:s} è stata rimossa",
|
||||
"app_sources_fetch_failed": "Impossibile riportare i file sorgenti",
|
||||
"app_upgrade_failed": "Impossibile aggiornare {app:s}",
|
||||
"app_upgraded": "{app:s} è stata aggiornata",
|
||||
"app_removed": "{app:s} rimossa",
|
||||
"app_sources_fetch_failed": "Impossibile riportare i file sorgenti, l'URL è corretto?",
|
||||
"app_upgrade_failed": "Impossibile aggiornare {app:s}: {error}",
|
||||
"app_upgraded": "{app:s} aggiornata",
|
||||
"app_requirements_checking": "Controllo i pacchetti richiesti per {app}…",
|
||||
"app_requirements_unmeet": "Requisiti non soddisfatti per {app}, il pacchetto {pkgname} ({version}) deve essere {spec}",
|
||||
"ask_firstname": "Nome",
|
||||
|
@ -45,8 +45,8 @@
|
|||
"ask_new_admin_password": "Nuova password dell'amministrazione",
|
||||
"backup_app_failed": "Non è possibile fare il backup dell'applicazione '{app:s}'",
|
||||
"backup_archive_app_not_found": "L'applicazione '{app:s}' non è stata trovata nel archivio di backup",
|
||||
"app_argument_choice_invalid": "Scelta non valida per l'argomento '{name:s}', deve essere uno di {choices:s}",
|
||||
"app_argument_invalid": "Valore non valido per '{name:s}': {error:s}",
|
||||
"app_argument_choice_invalid": "Usa una delle seguenti scelte '{choices:s}' per il parametro '{name:s}'",
|
||||
"app_argument_invalid": "Scegli un valore valido per il parametro '{name:s}': {error:s}",
|
||||
"app_argument_required": "L'argomento '{name:s}' è requisito",
|
||||
"app_id_invalid": "Identificativo dell'applicazione non valido",
|
||||
"app_unsupported_remote_type": "Il tipo remoto usato per l'applicazione non è supportato",
|
||||
|
@ -171,15 +171,15 @@
|
|||
"certmanager_attempt_to_renew_nonLE_cert": "Il certificato per il dominio {domain:s} non è emesso da Let's Encrypt. Impossibile rinnovarlo automaticamente!",
|
||||
"certmanager_attempt_to_renew_valid_cert": "Il certificato per il dominio {domain:s} non è a scadere! Usa --force per ignorare",
|
||||
"certmanager_domain_http_not_working": "Sembra che non sia possibile accedere al dominio {domain:s} attraverso HTTP. Verifica la configurazione del DNS e di nginx",
|
||||
"app_already_installed_cant_change_url": "Questa applicazione è già installata. L'URL non può essere cambiato solo da questa funzione. Guarda se `app changeurl` è disponibile.",
|
||||
"app_already_installed_cant_change_url": "Questa applicazione è già installata. L'URL non può essere cambiato solo da questa funzione. Controlla se `app changeurl` è disponibile.",
|
||||
"app_already_up_to_date": "{app:s} è già aggiornata",
|
||||
"app_change_url_failed_nginx_reload": "Riavvio di nginx fallito. Questo è il risultato di 'nginx -t':\n{nginx_errors:s}",
|
||||
"app_change_url_failed_nginx_reload": "Non riesco a riavviare NGINX. Questo è il risultato di 'nginx -t':\n{nginx_errors:s}",
|
||||
"app_change_url_identical_domains": "Il vecchio ed il nuovo dominio/percorso_url sono identici ('{domain:s}{path:s}'), nessuna operazione necessaria.",
|
||||
"app_change_url_no_script": "L'applicazione '{app_name:s}' non supporta ancora la modifica dell'URL. Forse dovresti aggiornare l'applicazione.",
|
||||
"app_change_url_success": "URL dell'applicazione {app:s} cambiato con successo in {domain:s}{path:s}",
|
||||
"app_make_default_location_already_used": "Impostazione dell'applicazione '{app}' come predefinita del dominio {domain} non riuscita perchè è già stata impostata per l'altra applicazione '{other_app}'",
|
||||
"app_location_unavailable": "Questo URL non è disponibile o va in conflitto con la/le applicazione/i già installata/e:\n{apps:s}",
|
||||
"app_upgrade_app_name": "Aggiornando l'applicazione {app}…",
|
||||
"app_change_url_no_script": "L'applicazione '{app_name:s}' non supporta ancora la modifica dell'URL. Forse dovresti aggiornarla.",
|
||||
"app_change_url_success": "L'URL dell'applicazione {app:s} è stato cambiato in {domain:s}{path:s}",
|
||||
"app_make_default_location_already_used": "Impostazione dell'applicazione '{app}' come predefinita del dominio non riuscita perché il dominio {domain} è già in uso per l'altra applicazione '{other_app}'",
|
||||
"app_location_unavailable": "Questo URL non è più disponibile o va in conflitto con la/le applicazione/i già installata/e:\n{apps:s}",
|
||||
"app_upgrade_app_name": "Aggiornamento dell'applicazione {app}…",
|
||||
"app_upgrade_some_app_failed": "Impossibile aggiornare alcune applicazioni",
|
||||
"backup_abstract_method": "Questo metodo di backup non è ancora stato implementato",
|
||||
"backup_applying_method_borg": "Inviando tutti i file da salvare nel backup nel deposito borg-backup…",
|
||||
|
@ -212,11 +212,11 @@
|
|||
"certmanager_cert_install_success": "Certificato Let's Encrypt per il dominio {domain:s} installato con successo!",
|
||||
"aborting": "Annullamento.",
|
||||
"admin_password_too_long": "Per favore scegli una password più corta di 127 caratteri",
|
||||
"app_not_upgraded": "Le seguenti app non sono state aggiornate: {apps}",
|
||||
"app_start_install": "Installando l'applicazione {app}…",
|
||||
"app_start_remove": "Rimuovendo l'applicazione {app}…",
|
||||
"app_start_backup": "Raccogliendo file da salvare nel backup per {app}…",
|
||||
"app_start_restore": "Ripristinando l'applicazione {app}…",
|
||||
"app_not_upgraded": "Impossibile aggiornare le applicazioni '{failed_app}' e di conseguenza l'aggiornamento delle seguenti applicazione è stato cancellato: {apps}",
|
||||
"app_start_install": "Installando l'applicazione '{app}'…",
|
||||
"app_start_remove": "Rimozione dell'applicazione {app}…",
|
||||
"app_start_backup": "Raccogliendo file da salvare nel backup per '{app}'…",
|
||||
"app_start_restore": "Ripristino dell'applicazione '{app}'…",
|
||||
"app_upgrade_several_apps": "Le seguenti app saranno aggiornate : {apps}",
|
||||
"ask_new_domain": "Nuovo dominio",
|
||||
"ask_new_path": "Nuovo percorso",
|
||||
|
@ -233,7 +233,7 @@
|
|||
"password_too_simple_4": "La password deve essere lunga almeno 12 caratteri e contenere numeri, maiuscole e minuscole",
|
||||
"users_available": "Utenti disponibili:",
|
||||
"yunohost_ca_creation_success": "L'autorità di certificazione locale è stata creata.",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "Questa app richiede alcuni servizi che attualmente non sono attivi. Prima di continuare, dovresti provare a riavviare i seguenti servizi (e possibilmente capire perchè questi non siano attivi) : {services}",
|
||||
"app_action_cannot_be_ran_because_required_services_down": "I seguenti servizi dovrebbero essere in funzione per completare questa azione: {services}. Prova a riavviarli per proseguire (e possibilmente cercare di capire come ma non funzionano più).",
|
||||
"backup_output_symlink_dir_broken": "Hai un collegamento errato alla tua cartella di archiviazione '{path:s}'. Potresti avere delle impostazioni particolari per salvare i tuoi dati su un altro spazio, in questo caso probabilmente ti sei scordato di rimontare o collegare il tuo hard disk o la chiavetta usb.",
|
||||
"certmanager_conflicting_nginx_file": "Impossibile preparare il dominio per il controllo ACME: il file di configurazione nginx {filepath:s} è in conflitto e dovrebbe essere prima rimosso",
|
||||
"certmanager_couldnt_fetch_intermediate_cert": "Tempo scaduto durante il tentativo di recupero di un certificato intermedio da Let's Encrypt. Installazione/rinnovo non riuscito - per favore riprova più tardi.",
|
||||
|
@ -264,7 +264,7 @@
|
|||
"global_settings_reset_success": "Successo. Le tue impostazioni precedenti sono state salvate in {path:s}",
|
||||
"global_settings_setting_example_bool": "Esempio di opzione booleana",
|
||||
"global_settings_setting_example_enum": "Esempio di opzione enum",
|
||||
"already_up_to_date": "Niente da fare! Tutto è già aggiornato!",
|
||||
"already_up_to_date": "Niente da fare. Tutto è già aggiornato.",
|
||||
"global_settings_setting_example_int": "Esempio di opzione int",
|
||||
"global_settings_setting_example_string": "Esempio di opzione string",
|
||||
"global_settings_setting_security_nginx_compatibility": "Bilanciamento tra compatibilità e sicurezza per il server web nginx. Riguarda gli algoritmi di cifratura (e altri aspetti legati alla sicurezza)",
|
||||
|
@ -335,5 +335,10 @@
|
|||
"migration_0003_not_jessie": "La distribuzione attuale non è Jessie!",
|
||||
"migration_0003_system_not_fully_up_to_date": "Il tuo sistema non è completamente aggiornato. Per favore prima esegui un aggiornamento normale prima di migrare a stretch.",
|
||||
"this_action_broke_dpkg": "Questa azione ha danneggiato dpkg/apt (i gestori di pacchetti del sistema)… Puoi provare a risolvere questo problema connettendoti via SSH ed eseguendo `sudo dpkg --configure -a`.",
|
||||
"app_action_broke_system": "Questa azione sembra avere roto servizi importanti: {services}"
|
||||
}
|
||||
"app_action_broke_system": "Questa azione sembra avere rotto questi servizi importanti: {services}",
|
||||
"app_remove_after_failed_install": "Rimozione dell'applicazione a causa del fallimento dell'installazione…",
|
||||
"app_install_script_failed": "Si è verificato un errore nello script di installazione dell'applicazione",
|
||||
"app_install_failed": "Impossibile installare {app}:{error}",
|
||||
"app_full_domain_unavailable": "Spiacente, questa app deve essere installata su un proprio dominio, ma altre applicazioni sono state installate sul dominio '{domain}'. Dovresti invece usare un sotto-dominio dedicato per questa app.",
|
||||
"app_upgrade_script_failed": "È stato trovato un errore nello script di aggiornamento dell'applicazione"
|
||||
}
|
||||
|
|
|
@ -472,7 +472,7 @@
|
|||
"migrations_not_pending_cant_skip": "Aquestas migracions son pas en espèra, las podètz pas doncas ignorar : {ids}",
|
||||
"app_action_broke_system": "Aquesta accion sembla aver copat de servicis importants : {services}",
|
||||
"diagnosis_display_tip_web": "Podètz anar a la seccion Diagnostic (dins l’ecran d’acuèlh) per veire los problèmas trobats.",
|
||||
"diagnosis_ip_no_ipv6": "Lo servidor a pas d’adreça IPv5 activa.",
|
||||
"diagnosis_ip_no_ipv6": "Lo servidor a pas d’adreça IPv6 activa.",
|
||||
"diagnosis_ip_not_connected_at_all": "Lo servidor sembla pas connectat a Internet ?!",
|
||||
"diagnosis_security_all_good": "Cap de vulnerabilitat de seguretat critica pas trobada.",
|
||||
"diagnosis_description_regenconf": "Configuracion sistèma",
|
||||
|
@ -537,7 +537,7 @@
|
|||
"group_cannot_be_deleted": "Lo grop « {group} » pòt pas èsser suprimit manualament.",
|
||||
"diagnosis_found_warnings": "Trobat {warnings} element(s) que se poirián melhorar per {category}.",
|
||||
"diagnosis_dns_missing_record": "Segon la configuracion DNS recomandada, vos calriá ajustar un enregistrament DNS\ntipe: {type}\nnom: {name}\nvalor: {value}",
|
||||
"diagnosis_dns_discrepancy": "Segon la configuracion DNS recomandada, la valor per l’enregistrament DNS\ntipe: {type}\nnom: {name}\ndeuriá èsser: {current}\nallòc de: {value}",
|
||||
"diagnosis_dns_discrepancy": "La configuracion DNS seguenta sembla pas la configuracion recomandada : <br>Tipe : <code>{type}</code><br>Nom : <code>{name}</code><br>Valors actualas :<code> {current]</code><br>Valor esperada : <code>{value}</code>",
|
||||
"diagnosis_regenconf_manually_modified_debian_details": "Es pas problematic, mas car téner d’agacher...",
|
||||
"diagnosis_ports_could_not_diagnose": "Impossible de diagnosticar se los pòrts son accessibles de l’exterior.",
|
||||
"diagnosis_ports_could_not_diagnose_details": "Error : {error}",
|
||||
|
@ -578,5 +578,9 @@
|
|||
"diagnosis_mail_ehlo_could_not_diagnose_details": "Error : {error}",
|
||||
"diagnosis_mail_queue_unavailable_details": "Error : {error}",
|
||||
"diagnosis_basesystem_hardware": "L’arquitectura del servidor es {virt} {arch}",
|
||||
"diagnosis_basesystem_hardware_board": "Lo modèl de carta del servidor es {model}"
|
||||
"diagnosis_basesystem_hardware_board": "Lo modèl de carta del servidor es {model}",
|
||||
"backup_archive_corrupted": "Sembla que l’archiu de la salvagarda « {archive} » es corromput : {error}",
|
||||
"diagnosis_domain_expires_in": "{domain} expiraà d’aquí {days} jorns.",
|
||||
"migration_0015_cleaning_up": "Netejatge de la memòria cache e dels paquets pas mai necessaris…",
|
||||
"restore_already_installed_apps": "Restauracion impossibla de las aplicacions seguentas que son ja installadas : {apps}"
|
||||
}
|
||||
|
|
|
@ -10,3 +10,5 @@ markers =
|
|||
with_legacy_app_installed
|
||||
with_backup_recommended_app_installed_with_ynh_restore
|
||||
with_permission_app_installed
|
||||
filterwarnings =
|
||||
ignore::urllib3.exceptions.InsecureRequestWarning
|
|
@ -0,0 +1,205 @@
|
|||
#! /usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import moulinette
|
||||
from moulinette import m18n
|
||||
from moulinette.utils.log import configure_logging
|
||||
from moulinette.interfaces.cli import colorize, get_locale
|
||||
|
||||
|
||||
def is_installed():
|
||||
return os.path.isfile('/etc/yunohost/installed')
|
||||
|
||||
|
||||
def cli(debug, quiet, output_as, timeout, args, parser):
|
||||
|
||||
init_logging(interface="cli", debug=debug, quiet=quiet)
|
||||
|
||||
# Check that YunoHost is installed
|
||||
if not is_installed():
|
||||
check_command_is_valid_before_postinstall(args)
|
||||
|
||||
ret = moulinette.cli(
|
||||
args,
|
||||
output_as=output_as,
|
||||
timeout=timeout,
|
||||
top_parser=parser
|
||||
)
|
||||
sys.exit(ret)
|
||||
|
||||
|
||||
def api(debug, host, port):
|
||||
|
||||
init_logging(interface="api", debug=debug)
|
||||
|
||||
def is_installed_api():
|
||||
return {'installed': is_installed()}
|
||||
|
||||
# FIXME : someday, maybe find a way to disable route /postinstall if
|
||||
# postinstall already done ...
|
||||
|
||||
ret = moulinette.api(
|
||||
host=host,
|
||||
port=port,
|
||||
routes={('GET', '/installed'): is_installed_api},
|
||||
)
|
||||
sys.exit(ret)
|
||||
|
||||
|
||||
def check_command_is_valid_before_postinstall(args):
|
||||
|
||||
allowed_if_not_postinstalled = ['tools postinstall',
|
||||
'tools versions',
|
||||
'backup list',
|
||||
'backup restore',
|
||||
'log display']
|
||||
|
||||
if (len(args) < 2 or (args[0] + ' ' + args[1] not in allowed_if_not_postinstalled)):
|
||||
init_i18n()
|
||||
print(colorize(m18n.g('error'), 'red') + " " + m18n.n('yunohost_not_installed'))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def init(interface="cli", debug=False, quiet=False, logdir="/var/log/yunohost"):
|
||||
"""
|
||||
This is a small util function ONLY meant to be used to initialize a Yunohost
|
||||
context when ran from tests or from scripts.
|
||||
"""
|
||||
init_logging(interface=interface, debug=debug, quiet=quiet, logdir=logdir)
|
||||
init_i18n()
|
||||
from moulinette.core import MoulinetteLock
|
||||
lock = MoulinetteLock("yunohost", timeout=30)
|
||||
lock.acquire()
|
||||
return lock
|
||||
|
||||
|
||||
def init_i18n():
|
||||
# This should only be called when not willing to go through moulinette.cli
|
||||
# or moulinette.api but still willing to call m18n.n/g...
|
||||
m18n.load_namespace('yunohost')
|
||||
m18n.set_locale(get_locale())
|
||||
|
||||
|
||||
def init_logging(interface="cli",
|
||||
debug=False,
|
||||
quiet=False,
|
||||
logdir="/var/log/yunohost"):
|
||||
|
||||
logfile = os.path.join(logdir, "yunohost-%s.log" % interface)
|
||||
|
||||
if not os.path.isdir(logdir):
|
||||
os.makedirs(logdir, 0750)
|
||||
|
||||
# ####################################################################### #
|
||||
# Logging configuration for CLI (or any other interface than api...) #
|
||||
# ####################################################################### #
|
||||
if interface != "api":
|
||||
configure_logging({
|
||||
'version': 1,
|
||||
'disable_existing_loggers': True,
|
||||
'formatters': {
|
||||
'tty-debug': {
|
||||
'format': '%(relativeCreated)-4d %(fmessage)s'
|
||||
},
|
||||
'precise': {
|
||||
'format': '%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s'
|
||||
},
|
||||
},
|
||||
'filters': {
|
||||
'action': {
|
||||
'()': 'moulinette.utils.log.ActionFilter',
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'tty': {
|
||||
'level': 'DEBUG' if debug else 'INFO',
|
||||
'class': 'moulinette.interfaces.cli.TTYHandler',
|
||||
'formatter': 'tty-debug' if debug else '',
|
||||
},
|
||||
'file': {
|
||||
'class': 'logging.FileHandler',
|
||||
'formatter': 'precise',
|
||||
'filename': logfile,
|
||||
'filters': ['action'],
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'yunohost': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': ['file', 'tty'] if not quiet else ['file'],
|
||||
'propagate': False,
|
||||
},
|
||||
'moulinette': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': [],
|
||||
'propagate': True,
|
||||
},
|
||||
'moulinette.interface': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': ['file', 'tty'] if not quiet else ['file'],
|
||||
'propagate': False,
|
||||
},
|
||||
},
|
||||
'root': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': ['file', 'tty'] if debug else ['file'],
|
||||
},
|
||||
})
|
||||
# ####################################################################### #
|
||||
# Logging configuration for API #
|
||||
# ####################################################################### #
|
||||
else:
|
||||
configure_logging({
|
||||
'version': 1,
|
||||
'disable_existing_loggers': True,
|
||||
'formatters': {
|
||||
'console': {
|
||||
'format': '%(relativeCreated)-5d %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s'
|
||||
},
|
||||
'precise': {
|
||||
'format': '%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s'
|
||||
},
|
||||
},
|
||||
'filters': {
|
||||
'action': {
|
||||
'()': 'moulinette.utils.log.ActionFilter',
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'api': {
|
||||
'level': 'DEBUG' if debug else 'INFO',
|
||||
'class': 'moulinette.interfaces.api.APIQueueHandler',
|
||||
},
|
||||
'file': {
|
||||
'class': 'logging.handlers.WatchedFileHandler',
|
||||
'formatter': 'precise',
|
||||
'filename': logfile,
|
||||
'filters': ['action'],
|
||||
},
|
||||
'console': {
|
||||
'class': 'logging.StreamHandler',
|
||||
'formatter': 'console',
|
||||
'stream': 'ext://sys.stdout',
|
||||
'filters': ['action'],
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'yunohost': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': ['file', 'api'] + ['console'] if debug else [],
|
||||
'propagate': False,
|
||||
},
|
||||
'moulinette': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': [],
|
||||
'propagate': True,
|
||||
},
|
||||
},
|
||||
'root': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': ['file'] + ['console'] if debug else [],
|
||||
},
|
||||
})
|
|
@ -35,14 +35,13 @@ import subprocess
|
|||
import glob
|
||||
import urllib
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime
|
||||
|
||||
from moulinette import msignals, m18n, msettings
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.network import download_json
|
||||
from moulinette.utils.filesystem import read_file, read_json, read_toml, read_yaml, write_to_file, write_to_json, write_to_yaml, chmod, chown, mkdir
|
||||
|
||||
from yunohost.service import service_log, service_status, _run_service_command
|
||||
from yunohost.service import service_status, _run_service_command
|
||||
from yunohost.utils import packages
|
||||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.log import is_unit_operation, OperationLogger
|
||||
|
@ -172,7 +171,7 @@ def app_info(app, full=False):
|
|||
ret["manifest"] = local_manifest
|
||||
ret['settings'] = settings
|
||||
|
||||
absolute_app_name = app if "__" not in app else app[:app.index('__')] # idk this is the name of the app even for multiinstance apps (so wordpress__2 -> wordpress)
|
||||
absolute_app_name, _ = _parse_app_instance_name(app)
|
||||
ret["from_catalog"] = _load_apps_catalog()["apps"].get(absolute_app_name, {})
|
||||
ret['upgradable'] = _app_upgradable(ret)
|
||||
ret['supports_change_url'] = os.path.exists(os.path.join(APPS_SETTING_PATH, app, "scripts", "change_url"))
|
||||
|
@ -452,6 +451,7 @@ def app_upgrade(app=[], url=None, file=None):
|
|||
"""
|
||||
from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback
|
||||
from yunohost.permission import permission_sync_to_user
|
||||
from yunohost.regenconf import manually_modified_files
|
||||
|
||||
apps = app
|
||||
# If no app is specified, upgrade all apps
|
||||
|
@ -514,11 +514,14 @@ def app_upgrade(app=[], url=None, file=None):
|
|||
env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name
|
||||
env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb)
|
||||
|
||||
# We'll check that the app didn't brutally edit some system configuration
|
||||
manually_modified_files_before_install = manually_modified_files()
|
||||
|
||||
# Attempt to patch legacy helpers ...
|
||||
_patch_legacy_helpers(extracted_app_folder)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_php5(extracted_app_folder)
|
||||
_patch_legacy_php_versions(extracted_app_folder)
|
||||
|
||||
# Start register change on system
|
||||
related_to = [('app', app_instance_name)]
|
||||
|
@ -550,7 +553,7 @@ def app_upgrade(app=[], url=None, file=None):
|
|||
# Something wrong happened in Yunohost's code (most probably hook_exec)
|
||||
except Exception:
|
||||
import traceback
|
||||
error = m18n.n('unexpected_error', error=u"\n" + traceback.format_exc())
|
||||
error = m18n.n('unexpected_error', error="\n" + traceback.format_exc())
|
||||
logger.error(m18n.n("app_install_failed", app=app_instance_name, error=error))
|
||||
failure_message_with_debug_instructions = operation_logger.error(error)
|
||||
finally:
|
||||
|
@ -564,6 +567,12 @@ def app_upgrade(app=[], url=None, file=None):
|
|||
logger.error(m18n.n("app_upgrade_failed", app=app_instance_name, error=str(e)))
|
||||
failure_message_with_debug_instructions = operation_logger.error(str(e))
|
||||
|
||||
# We'll check that the app didn't brutally edit some system configuration
|
||||
manually_modified_files_after_install = manually_modified_files()
|
||||
manually_modified_files_by_app = set(manually_modified_files_after_install) - set(manually_modified_files_before_install)
|
||||
if manually_modified_files_by_app:
|
||||
logger.error("Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - " + '\n -'.join(manually_modified_files_by_app))
|
||||
|
||||
# If upgrade failed or broke the system,
|
||||
# raise an error and interrupt all other pending upgrades
|
||||
if upgrade_failed or broke_the_system:
|
||||
|
@ -627,6 +636,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback
|
||||
from yunohost.log import OperationLogger
|
||||
from yunohost.permission import user_permission_list, permission_create, permission_url, permission_delete, permission_sync_to_user, user_permission_update
|
||||
from yunohost.regenconf import manually_modified_files
|
||||
|
||||
# Fetch or extract sources
|
||||
if not os.path.exists(INSTALL_TMP):
|
||||
|
@ -714,27 +724,30 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
args_dict = {} if not args else \
|
||||
dict(urlparse.parse_qsl(args, keep_blank_values=True))
|
||||
args_odict = _parse_args_from_manifest(manifest, 'install', args=args_dict)
|
||||
args_list = [value[0] for value in args_odict.values()]
|
||||
args_list.append(app_instance_name)
|
||||
|
||||
# Validate domain / path availability for webapps
|
||||
_validate_and_normalize_webpath(manifest, args_odict, extracted_app_folder)
|
||||
|
||||
# build arg list tq
|
||||
args_list = [value[0] for value in args_odict.values()]
|
||||
args_list.append(app_instance_name)
|
||||
|
||||
# Attempt to patch legacy helpers ...
|
||||
_patch_legacy_helpers(extracted_app_folder)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_php5(extracted_app_folder)
|
||||
_patch_legacy_php_versions(extracted_app_folder)
|
||||
|
||||
# Prepare env. var. to pass to script
|
||||
env_dict = _make_environment_dict(args_odict)
|
||||
env_dict["YNH_APP_ID"] = app_id
|
||||
env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name
|
||||
env_dict["YNH_APP_INSTANCE_NUMBER"] = str(instance_number)
|
||||
|
||||
# Start register change on system
|
||||
operation_logger.extra.update({'env': env_dict})
|
||||
|
||||
# We'll check that the app didn't brutally edit some system configuration
|
||||
manually_modified_files_before_install = manually_modified_files()
|
||||
|
||||
# Tell the operation_logger to redact all password-type args
|
||||
# Also redact the % escaped version of the password that might appear in
|
||||
# the 'args' section of metadata (relevant for password with non-alphanumeric char)
|
||||
|
@ -805,19 +818,25 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
# Something wrong happened in Yunohost's code (most probably hook_exec)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
error = m18n.n('unexpected_error', error=u"\n" + traceback.format_exc())
|
||||
error = m18n.n('unexpected_error', error="\n" + traceback.format_exc())
|
||||
logger.error(m18n.n("app_install_failed", app=app_id, error=error))
|
||||
failure_message_with_debug_instructions = operation_logger.error(error)
|
||||
finally:
|
||||
# Whatever happened (install success or failure) we check if it broke the system
|
||||
# and warn the user about it
|
||||
try:
|
||||
broke_the_system = False
|
||||
_assert_system_is_sane_for_app(manifest, "post")
|
||||
except Exception as e:
|
||||
broke_the_system = True
|
||||
logger.error(m18n.n("app_install_failed", app=app_id, error=str(e)))
|
||||
failure_message_with_debug_instructions = operation_logger.error(str(e))
|
||||
# If success so far, validate that app didn't break important stuff
|
||||
if not install_failed:
|
||||
try:
|
||||
broke_the_system = False
|
||||
_assert_system_is_sane_for_app(manifest, "post")
|
||||
except Exception as e:
|
||||
broke_the_system = True
|
||||
logger.error(m18n.n("app_install_failed", app=app_id, error=str(e)))
|
||||
failure_message_with_debug_instructions = operation_logger.error(str(e))
|
||||
|
||||
# We'll check that the app didn't brutally edit some system configuration
|
||||
manually_modified_files_after_install = manually_modified_files()
|
||||
manually_modified_files_by_app = set(manually_modified_files_after_install) - set(manually_modified_files_before_install)
|
||||
if manually_modified_files_by_app:
|
||||
logger.error("Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - " + '\n -'.join(manually_modified_files_by_app))
|
||||
|
||||
# If the install failed or broke the system, we remove it
|
||||
if install_failed or broke_the_system:
|
||||
|
@ -846,6 +865,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
os.path.join(extracted_app_folder, 'scripts/remove'),
|
||||
args=[app_instance_name], env=env_dict_remove
|
||||
)[0]
|
||||
|
||||
# Here again, calling hook_exec could fail miserably, or get
|
||||
# manually interrupted (by mistake or because script was stuck)
|
||||
# In that case we still want to proceed with the rest of the
|
||||
|
@ -853,7 +873,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
|
|||
except (KeyboardInterrupt, EOFError, Exception):
|
||||
remove_retcode = -1
|
||||
import traceback
|
||||
logger.error(m18n.n('unexpected_error', error=u"\n" + traceback.format_exc()))
|
||||
logger.error(m18n.n('unexpected_error', error="\n" + traceback.format_exc()))
|
||||
|
||||
# Remove all permission in LDAP
|
||||
for permission_name in user_permission_list()["permissions"].keys():
|
||||
|
@ -926,12 +946,12 @@ def dump_app_log_extract_for_debugging(operation_logger):
|
|||
r"ynh_script_progression"
|
||||
]
|
||||
|
||||
filters = [re.compile(f) for f in filters]
|
||||
filters = [re.compile(f_) for f_ in filters]
|
||||
|
||||
lines_to_display = []
|
||||
for line in lines:
|
||||
|
||||
if not ": " in line.strip():
|
||||
if ": " not in line.strip():
|
||||
continue
|
||||
|
||||
# A line typically looks like
|
||||
|
@ -1013,7 +1033,7 @@ def app_remove(operation_logger, app):
|
|||
|
||||
# Apply dirty patch to make php5 apps compatible with php7 (e.g. the remove
|
||||
# script might date back from jessie install)
|
||||
_patch_php5(app_setting_path)
|
||||
_patch_legacy_php_versions(app_setting_path)
|
||||
|
||||
manifest = _get_manifest_of_app(app_setting_path)
|
||||
|
||||
|
@ -1042,7 +1062,7 @@ def app_remove(operation_logger, app):
|
|||
except (KeyboardInterrupt, EOFError, Exception):
|
||||
ret = -1
|
||||
import traceback
|
||||
logger.error(m18n.n('unexpected_error', error=u"\n" + traceback.format_exc()))
|
||||
logger.error(m18n.n('unexpected_error', error="\n" + traceback.format_exc()))
|
||||
|
||||
if ret == 0:
|
||||
logger.success(m18n.n('app_removed', app=app))
|
||||
|
@ -1339,7 +1359,7 @@ def app_ssowatconf():
|
|||
url = _sanitized_absolute_url(perm_info["url"])
|
||||
perm_info["url"] = url
|
||||
if "visitors" in perm_info["allowed"]:
|
||||
if url not in unprotected_urls:
|
||||
if url not in unprotected_urls and url not in skipped_urls:
|
||||
unprotected_urls.append(url)
|
||||
|
||||
# Legacy stuff : we remove now protected-urls that might have been declared as unprotected earlier...
|
||||
|
@ -1866,6 +1886,9 @@ def _get_app_settings(app_id):
|
|||
with open(os.path.join(
|
||||
APPS_SETTING_PATH, app_id, 'settings.yml')) as f:
|
||||
settings = yaml.load(f)
|
||||
# If label contains unicode char, this may later trigger issues when building strings...
|
||||
# FIXME: this should be propagated to read_yaml so that this fix applies everywhere I think...
|
||||
settings = {k:_encode_string(v) for k,v in settings.items()}
|
||||
if app_id == settings['id']:
|
||||
return settings
|
||||
except (IOError, TypeError, KeyError):
|
||||
|
@ -2177,12 +2200,14 @@ def _fetch_app_from_git(app):
|
|||
else:
|
||||
app_dict = _load_apps_catalog()["apps"]
|
||||
|
||||
if app not in app_dict:
|
||||
app_id, _ = _parse_app_instance_name(app)
|
||||
|
||||
if app_id not in app_dict:
|
||||
raise YunohostError('app_unknown')
|
||||
elif 'git' not in app_dict[app]:
|
||||
elif 'git' not in app_dict[app_id]:
|
||||
raise YunohostError('app_unsupported_remote_type')
|
||||
|
||||
app_info = app_dict[app]
|
||||
app_info = app_dict[app_id]
|
||||
app_info['manifest']['lastUpdate'] = app_info['lastUpdate']
|
||||
manifest = app_info['manifest']
|
||||
url = app_info['git']['url']
|
||||
|
@ -2321,6 +2346,11 @@ def _encode_string(value):
|
|||
|
||||
def _check_manifest_requirements(manifest, app_instance_name):
|
||||
"""Check if required packages are met from the manifest"""
|
||||
|
||||
packaging_format = int(manifest.get('packaging_format', 0))
|
||||
if packaging_format not in [0, 1]:
|
||||
raise YunohostError("app_packaging_format_not_supported")
|
||||
|
||||
requirements = manifest.get('requirements', dict())
|
||||
|
||||
if not requirements:
|
||||
|
@ -2331,6 +2361,7 @@ def _check_manifest_requirements(manifest, app_instance_name):
|
|||
# Iterate over requirements
|
||||
for pkgname, spec in requirements.items():
|
||||
if not packages.meets_version_specifier(pkgname, spec):
|
||||
version = packages.ynh_packages_version()[pkgname]["version"]
|
||||
raise YunohostError('app_requirements_unmeet',
|
||||
pkgname=pkgname, version=version,
|
||||
spec=spec, app=app_instance_name)
|
||||
|
@ -2382,62 +2413,70 @@ def _parse_args_for_action(action, args={}):
|
|||
return _parse_args_in_yunohost_format(args, action_args)
|
||||
|
||||
|
||||
def _parse_args_in_yunohost_format(args, action_args):
|
||||
"""Parse arguments store in either manifest.json or actions.json
|
||||
def _parse_args_in_yunohost_format(user_answers, argument_questions):
|
||||
"""Parse arguments store in either manifest.json or actions.json or from a
|
||||
config panel against the user answers when they are present.
|
||||
|
||||
Keyword arguments:
|
||||
user_answers -- a dictionnary of arguments from the user (generally
|
||||
empty in CLI, filed from the admin interface)
|
||||
argument_questions -- the arguments description store in yunohost
|
||||
format from actions.json/toml, manifest.json/toml
|
||||
or config_panel.json/toml
|
||||
"""
|
||||
from yunohost.domain import domain_list, _get_maindomain
|
||||
from yunohost.user import user_info, user_list
|
||||
from yunohost.user import user_list
|
||||
|
||||
args_dict = OrderedDict()
|
||||
parsed_answers_dict = OrderedDict()
|
||||
|
||||
for arg in action_args:
|
||||
arg_name = arg['name']
|
||||
arg_type = arg.get('type', 'string')
|
||||
arg_default = arg.get('default', None)
|
||||
arg_choices = arg.get('choices', [])
|
||||
arg_value = None
|
||||
for question in argument_questions:
|
||||
question_name = question['name']
|
||||
question_type = question.get('type', 'string')
|
||||
question_default = question.get('default', None)
|
||||
question_choices = question.get('choices', [])
|
||||
question_value = None
|
||||
|
||||
# Transpose default value for boolean type and set it to
|
||||
# false if not defined.
|
||||
if arg_type == 'boolean':
|
||||
arg_default = 1 if arg_default else 0
|
||||
if question_type == 'boolean':
|
||||
question_default = 1 if question_default else 0
|
||||
|
||||
# do not print for webadmin
|
||||
if arg_type == 'display_text' and msettings.get('interface') != 'api':
|
||||
print(_value_for_locale(arg['ask']))
|
||||
if question_type == 'display_text' and msettings.get('interface') != 'api':
|
||||
print(_value_for_locale(question['ask']))
|
||||
continue
|
||||
|
||||
# Attempt to retrieve argument value
|
||||
if arg_name in args:
|
||||
arg_value = args[arg_name]
|
||||
if question_name in user_answers:
|
||||
question_value = user_answers[question_name]
|
||||
else:
|
||||
if 'ask' in arg:
|
||||
if 'ask' in question:
|
||||
# Retrieve proper ask string
|
||||
ask_string = _value_for_locale(arg['ask'])
|
||||
text_for_user_input_in_cli = _value_for_locale(question['ask'])
|
||||
|
||||
# Append extra strings
|
||||
if arg_type == 'boolean':
|
||||
ask_string += ' [yes | no]'
|
||||
elif arg_choices:
|
||||
ask_string += ' [{0}]'.format(' | '.join(arg_choices))
|
||||
if question_type == 'boolean':
|
||||
text_for_user_input_in_cli += ' [yes | no]'
|
||||
elif question_choices:
|
||||
text_for_user_input_in_cli += ' [{0}]'.format(' | '.join(question_choices))
|
||||
|
||||
if arg_default is not None:
|
||||
if arg_type == 'boolean':
|
||||
ask_string += ' (default: {0})'.format("yes" if arg_default == 1 else "no")
|
||||
if question_default is not None:
|
||||
if question_type == 'boolean':
|
||||
text_for_user_input_in_cli += ' (default: {0})'.format("yes" if question_default == 1 else "no")
|
||||
else:
|
||||
ask_string += ' (default: {0})'.format(arg_default)
|
||||
text_for_user_input_in_cli += ' (default: {0})'.format(question_default)
|
||||
|
||||
# Check for a password argument
|
||||
is_password = True if arg_type == 'password' else False
|
||||
is_password = True if question_type == 'password' else False
|
||||
|
||||
if arg_type == 'domain':
|
||||
arg_default = _get_maindomain()
|
||||
ask_string += ' (default: {0})'.format(arg_default)
|
||||
if question_type == 'domain':
|
||||
question_default = _get_maindomain()
|
||||
text_for_user_input_in_cli += ' (default: {0})'.format(question_default)
|
||||
msignals.display(m18n.n('domains_available'))
|
||||
for domain in domain_list()['domains']:
|
||||
msignals.display("- {}".format(domain))
|
||||
|
||||
elif arg_type == 'user':
|
||||
elif question_type == 'user':
|
||||
msignals.display(m18n.n('users_available'))
|
||||
users = user_list()['users']
|
||||
for user in users.keys():
|
||||
|
@ -2449,66 +2488,66 @@ def _parse_args_in_yunohost_format(args, action_args):
|
|||
arg_default = user
|
||||
ask_string += ' (default: {0})'.format(arg_default)
|
||||
|
||||
elif arg_type == 'password':
|
||||
elif question_type == 'password':
|
||||
msignals.display(m18n.n('good_practices_about_user_password'))
|
||||
|
||||
try:
|
||||
input_string = msignals.prompt(ask_string, is_password)
|
||||
input_string = msignals.prompt(text_for_user_input_in_cli, is_password)
|
||||
except NotImplementedError:
|
||||
input_string = None
|
||||
if (input_string == '' or input_string is None) \
|
||||
and arg_default is not None:
|
||||
arg_value = arg_default
|
||||
and question_default is not None:
|
||||
question_value = question_default
|
||||
else:
|
||||
arg_value = input_string
|
||||
elif arg_default is not None:
|
||||
arg_value = arg_default
|
||||
question_value = input_string
|
||||
elif question_default is not None:
|
||||
question_value = question_default
|
||||
|
||||
# If the value is empty (none or '')
|
||||
# then check if arg is optional or not
|
||||
if arg_value is None or arg_value == '':
|
||||
if arg.get("optional", False):
|
||||
# then check if question is optional or not
|
||||
if question_value is None or question_value == '':
|
||||
if question.get("optional", False):
|
||||
# Argument is optional, keep an empty value
|
||||
# and that's all for this arg !
|
||||
args_dict[arg_name] = ('', arg_type)
|
||||
# and that's all for this question!
|
||||
parsed_answers_dict[question_name] = ('', question_type)
|
||||
continue
|
||||
else:
|
||||
# The argument is required !
|
||||
raise YunohostError('app_argument_required', name=arg_name)
|
||||
raise YunohostError('app_argument_required', name=question_name)
|
||||
|
||||
# Validate argument choice
|
||||
if arg_choices and arg_value not in arg_choices:
|
||||
raise YunohostError('app_argument_choice_invalid', name=arg_name, choices=', '.join(arg_choices))
|
||||
if question_choices and question_value not in question_choices:
|
||||
raise YunohostError('app_argument_choice_invalid', name=question_name, choices=', '.join(question_choices))
|
||||
|
||||
# Validate argument type
|
||||
if arg_type == 'domain':
|
||||
if arg_value not in domain_list()['domains']:
|
||||
raise YunohostError('app_argument_invalid', name=arg_name, error=m18n.n('domain_unknown'))
|
||||
elif arg_type == 'user':
|
||||
if not arg_value in user_list()["users"].keys():
|
||||
raise YunohostError('app_argument_invalid', name=arg_name, error=m18n.n('user_unknown', user=arg_value))
|
||||
elif arg_type == 'app':
|
||||
if not _is_installed(arg_value):
|
||||
raise YunohostError('app_argument_invalid', name=arg_name, error=m18n.n('app_unknown'))
|
||||
elif arg_type == 'boolean':
|
||||
if isinstance(arg_value, bool):
|
||||
arg_value = 1 if arg_value else 0
|
||||
if question_type == 'domain':
|
||||
if question_value not in domain_list()['domains']:
|
||||
raise YunohostError('app_argument_invalid', name=question_name, error=m18n.n('domain_unknown'))
|
||||
elif question_type == 'user':
|
||||
if question_value not in user_list()["users"].keys():
|
||||
raise YunohostError('app_argument_invalid', name=question_name, error=m18n.n('user_unknown', user=question_value))
|
||||
elif question_type == 'app':
|
||||
if not _is_installed(question_value):
|
||||
raise YunohostError('app_argument_invalid', name=question_name, error=m18n.n('app_unknown'))
|
||||
elif question_type == 'boolean':
|
||||
if isinstance(question_value, bool):
|
||||
question_value = 1 if question_value else 0
|
||||
else:
|
||||
if str(arg_value).lower() in ["1", "yes", "y"]:
|
||||
arg_value = 1
|
||||
elif str(arg_value).lower() in ["0", "no", "n"]:
|
||||
arg_value = 0
|
||||
if str(question_value).lower() in ["1", "yes", "y"]:
|
||||
question_value = 1
|
||||
elif str(question_value).lower() in ["0", "no", "n"]:
|
||||
question_value = 0
|
||||
else:
|
||||
raise YunohostError('app_argument_choice_invalid', name=arg_name, choices='yes, no, y, n, 1, 0')
|
||||
elif arg_type == 'password':
|
||||
raise YunohostError('app_argument_choice_invalid', name=question_name, choices='yes, no, y, n, 1, 0')
|
||||
elif question_type == 'password':
|
||||
forbidden_chars = "{}"
|
||||
if any(char in arg_value for char in forbidden_chars):
|
||||
if any(char in question_value for char in forbidden_chars):
|
||||
raise YunohostError('pattern_password_app', forbidden_chars=forbidden_chars)
|
||||
from yunohost.utils.password import assert_password_is_strong_enough
|
||||
assert_password_is_strong_enough('user', arg_value)
|
||||
args_dict[arg_name] = (arg_value, arg_type)
|
||||
assert_password_is_strong_enough('user', question_value)
|
||||
parsed_answers_dict[question_name] = (question_value, question_type)
|
||||
|
||||
return args_dict
|
||||
return parsed_answers_dict
|
||||
|
||||
|
||||
def _validate_and_normalize_webpath(manifest, args_dict, app_folder):
|
||||
|
@ -2651,12 +2690,6 @@ def _read_apps_catalog_list():
|
|||
Read the json corresponding to the list of apps catalogs
|
||||
"""
|
||||
|
||||
# Legacy code - can be removed after moving to buster (if the migration got merged before buster)
|
||||
if os.path.exists('/etc/yunohost/appslists.json'):
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
migration = _get_migration_by_name("futureproof_apps_catalog_system")
|
||||
migration.run()
|
||||
|
||||
try:
|
||||
list_ = read_yaml(APPS_CATALOG_CONF)
|
||||
# Support the case where file exists but is empty
|
||||
|
@ -2793,21 +2826,6 @@ def is_true(arg):
|
|||
return True if arg else False
|
||||
|
||||
|
||||
def random_password(length=8):
|
||||
"""
|
||||
Generate a random string
|
||||
|
||||
Keyword arguments:
|
||||
length -- The string length to generate
|
||||
|
||||
"""
|
||||
import string
|
||||
import random
|
||||
|
||||
char_set = string.ascii_uppercase + string.digits + string.ascii_lowercase
|
||||
return ''.join([random.SystemRandom().choice(char_set) for x in range(length)])
|
||||
|
||||
|
||||
def unstable_apps():
|
||||
|
||||
output = []
|
||||
|
@ -2828,8 +2846,8 @@ def _assert_system_is_sane_for_app(manifest, when):
|
|||
|
||||
# Some apps use php-fpm or php5-fpm which is now php7.0-fpm
|
||||
def replace_alias(service):
|
||||
if service in ["php-fpm", "php5-fpm"]:
|
||||
return "php7.0-fpm"
|
||||
if service in ["php-fpm", "php5-fpm", "php7.0-fpm"]:
|
||||
return "php7.3-fpm"
|
||||
else:
|
||||
return service
|
||||
services = [replace_alias(s) for s in services]
|
||||
|
@ -2837,7 +2855,7 @@ def _assert_system_is_sane_for_app(manifest, when):
|
|||
# We only check those, mostly to ignore "custom" services
|
||||
# (added by apps) and because those are the most popular
|
||||
# services
|
||||
service_filter = ["nginx", "php7.0-fpm", "mysql", "postfix"]
|
||||
service_filter = ["nginx", "php7.3-fpm", "mysql", "postfix"]
|
||||
services = [str(s) for s in services if s in service_filter]
|
||||
|
||||
if "nginx" not in services:
|
||||
|
@ -2862,11 +2880,24 @@ def _assert_system_is_sane_for_app(manifest, when):
|
|||
raise YunohostError("this_action_broke_dpkg")
|
||||
|
||||
|
||||
def _patch_php5(app_folder):
|
||||
LEGACY_PHP_VERSION_REPLACEMENTS = [
|
||||
("/etc/php5", "/etc/php/7.3"),
|
||||
("/etc/php/7.0", "/etc/php/7.3"),
|
||||
("/var/run/php5-fpm", "/var/run/php/php7.3-fpm"),
|
||||
("/var/run/php/php7.0-fpm", "/var/run/php/php7.3-fpm"),
|
||||
("php5", "php7.3"),
|
||||
("php7.0", "php7.3"),
|
||||
('phpversion="${phpversion:-7.0}"', 'phpversion="${phpversion:-7.3}"'), # Many helpers like the composer ones use 7.0 by default ...
|
||||
('"$phpversion" == "7.0"', '$(bc <<< "$phpversion >= 7.3") -eq 1') # patch ynh_install_php to refuse installing/removing php <= 7.3
|
||||
]
|
||||
|
||||
|
||||
def _patch_legacy_php_versions(app_folder):
|
||||
|
||||
files_to_patch = []
|
||||
files_to_patch.extend(glob.glob("%s/conf/*" % app_folder))
|
||||
files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder))
|
||||
files_to_patch.extend(glob.glob("%s/scripts/*/*" % app_folder))
|
||||
files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder))
|
||||
files_to_patch.append("%s/manifest.json" % app_folder)
|
||||
files_to_patch.append("%s/manifest.toml" % app_folder)
|
||||
|
@ -2877,12 +2908,32 @@ def _patch_php5(app_folder):
|
|||
if not os.path.isfile(filename):
|
||||
continue
|
||||
|
||||
c = "sed -i -e 's@/etc/php5@/etc/php/7.0@g' " \
|
||||
"-e 's@/var/run/php5-fpm@/var/run/php/php7.0-fpm@g' " \
|
||||
"-e 's@php5@php7.0@g' " \
|
||||
"%s" % filename
|
||||
c = "sed -i " \
|
||||
+ "".join("-e 's@{pattern}@{replace}@g' ".format(pattern=p, replace=r) for p, r in LEGACY_PHP_VERSION_REPLACEMENTS) \
|
||||
+ "%s" % filename
|
||||
os.system(c)
|
||||
|
||||
|
||||
def _patch_legacy_php_versions_in_settings(app_folder):
|
||||
|
||||
settings = read_yaml(os.path.join(app_folder, 'settings.yml'))
|
||||
|
||||
if settings.get("fpm_config_dir") == "/etc/php/7.0/fpm":
|
||||
settings["fpm_config_dir"] = "/etc/php/7.3/fpm"
|
||||
if settings.get("fpm_service") == "php7.0-fpm":
|
||||
settings["fpm_service"] = "php7.3-fpm"
|
||||
if settings.get("phpversion") == "7.0":
|
||||
settings["phpversion"] = "7.3"
|
||||
|
||||
# We delete these checksums otherwise the file will appear as manually modified
|
||||
list_to_remove = ["checksum__etc_php_7.0_fpm_pool",
|
||||
"checksum__etc_nginx_conf.d"]
|
||||
settings = {k: v for k, v in settings.items()
|
||||
if not any(k.startswith(to_remove) for to_remove in list_to_remove)}
|
||||
|
||||
write_to_yaml(app_folder + '/settings.yml', settings)
|
||||
|
||||
|
||||
def _patch_legacy_helpers(app_folder):
|
||||
|
||||
files_to_patch = []
|
||||
|
|
|
@ -43,7 +43,13 @@ from moulinette.utils.log import getActionLogger
|
|||
from moulinette.utils.filesystem import read_file, mkdir, write_to_yaml, read_yaml
|
||||
|
||||
from yunohost.app import (
|
||||
app_info, _is_installed, _parse_app_instance_name, _patch_php5, dump_app_log_extract_for_debugging, _patch_legacy_helpers
|
||||
app_info, _is_installed,
|
||||
_parse_app_instance_name,
|
||||
dump_app_log_extract_for_debugging,
|
||||
_patch_legacy_helpers,
|
||||
_patch_legacy_php_versions,
|
||||
_patch_legacy_php_versions_in_settings,
|
||||
LEGACY_PHP_VERSION_REPLACEMENTS
|
||||
)
|
||||
from yunohost.hook import (
|
||||
hook_list, hook_info, hook_callback, hook_exec, CUSTOM_HOOK_FOLDER
|
||||
|
@ -219,8 +225,8 @@ class BackupManager():
|
|||
backup_manager = BackupManager(name="mybackup", description="bkp things")
|
||||
|
||||
# Add backup method to apply
|
||||
backup_manager.add(BackupMethod.create('copy','/mnt/local_fs'))
|
||||
backup_manager.add(BackupMethod.create('tar','/mnt/remote_fs'))
|
||||
backup_manager.add(BackupMethod.create('copy', backup_manager, '/mnt/local_fs'))
|
||||
backup_manager.add(BackupMethod.create('tar', backup_manager, '/mnt/remote_fs'))
|
||||
|
||||
# Define targets to be backuped
|
||||
backup_manager.set_system_targets(["data"])
|
||||
|
@ -752,7 +758,7 @@ class BackupManager():
|
|||
|
||||
for method in self.methods:
|
||||
logger.debug(m18n.n('backup_applying_method_' + method.method_name))
|
||||
method.mount_and_backup(self)
|
||||
method.mount_and_backup()
|
||||
logger.debug(m18n.n('backup_method_' + method.method_name + '_finished'))
|
||||
|
||||
def _compute_backup_size(self):
|
||||
|
@ -851,7 +857,7 @@ class RestoreManager():
|
|||
self.info = backup_info(name, with_details=True)
|
||||
self.archive_path = self.info['path']
|
||||
self.name = name
|
||||
self.method = BackupMethod.create(method)
|
||||
self.method = BackupMethod.create(method, self)
|
||||
self.targets = BackupRestoreTargetsManager()
|
||||
|
||||
#
|
||||
|
@ -956,6 +962,9 @@ class RestoreManager():
|
|||
# These are the hooks on the current installation
|
||||
available_restore_system_hooks = hook_list("restore")["hooks"]
|
||||
|
||||
custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, 'restore')
|
||||
filesystem.mkdir(custom_restore_hook_folder, 755, parents=True, force=True)
|
||||
|
||||
for system_part in target_list:
|
||||
# By default, we'll use the restore hooks on the current install
|
||||
# if available
|
||||
|
@ -967,24 +976,25 @@ class RestoreManager():
|
|||
continue
|
||||
|
||||
# Otherwise, attempt to find it (or them?) in the archive
|
||||
hook_paths = '{:s}/hooks/restore/*-{:s}'.format(self.work_dir, system_part)
|
||||
hook_paths = glob(hook_paths)
|
||||
|
||||
# If we didn't find it, we ain't gonna be able to restore it
|
||||
if len(hook_paths) == 0:
|
||||
if system_part not in self.info['system'] or\
|
||||
'paths' not in self.info['system'][system_part] or\
|
||||
len(self.info['system'][system_part]['paths']) == 0:
|
||||
logger.exception(m18n.n('restore_hook_unavailable', part=system_part))
|
||||
self.targets.set_result("system", system_part, "Skipped")
|
||||
continue
|
||||
|
||||
hook_paths = self.info['system'][system_part]['paths']
|
||||
hook_paths = [ 'hooks/restore/%s' % os.path.basename(p) for p in hook_paths ]
|
||||
|
||||
# Otherwise, add it from the archive to the system
|
||||
# FIXME: Refactor hook_add and use it instead
|
||||
custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, 'restore')
|
||||
filesystem.mkdir(custom_restore_hook_folder, 755, True)
|
||||
for hook_path in hook_paths:
|
||||
logger.debug("Adding restoration script '%s' to the system "
|
||||
"from the backup archive '%s'", hook_path,
|
||||
self.archive_path)
|
||||
shutil.copy(hook_path, custom_restore_hook_folder)
|
||||
self.method.copy(hook_path, custom_restore_hook_folder)
|
||||
|
||||
def set_apps_targets(self, apps=[]):
|
||||
"""
|
||||
|
@ -1000,10 +1010,20 @@ class RestoreManager():
|
|||
logger.error(m18n.n('backup_archive_app_not_found',
|
||||
app=app))
|
||||
|
||||
self.targets.set_wanted("apps",
|
||||
apps,
|
||||
self.info['apps'].keys(),
|
||||
unknown_error)
|
||||
to_be_restored = self.targets.set_wanted("apps",
|
||||
apps,
|
||||
self.info['apps'].keys(),
|
||||
unknown_error)
|
||||
|
||||
# If all apps to restore are already installed, stop right here.
|
||||
# Otherwise, if at least one app can be restored, we keep going on
|
||||
# because those which can be restored will indeed be restored
|
||||
already_installed = [app for app in to_be_restored if _is_installed(app)]
|
||||
if already_installed != []:
|
||||
if already_installed == to_be_restored:
|
||||
raise YunohostError("restore_already_installed_apps", apps=', '.join(already_installed))
|
||||
else:
|
||||
logger.warning(m18n.n("restore_already_installed_apps", apps=', '.join(already_installed)))
|
||||
|
||||
#
|
||||
# Archive mounting #
|
||||
|
@ -1044,7 +1064,7 @@ class RestoreManager():
|
|||
|
||||
filesystem.mkdir(self.work_dir, parents=True)
|
||||
|
||||
self.method.mount(self)
|
||||
self.method.mount()
|
||||
|
||||
self._read_info_files()
|
||||
|
||||
|
@ -1127,7 +1147,7 @@ class RestoreManager():
|
|||
self._postinstall_if_needed()
|
||||
|
||||
# Apply dirty patch to redirect php5 file on php7
|
||||
self._patch_backup_csv_file()
|
||||
self._patch_legacy_php_versions_in_csv_file()
|
||||
|
||||
self._restore_system()
|
||||
self._restore_apps()
|
||||
|
@ -1136,9 +1156,9 @@ class RestoreManager():
|
|||
finally:
|
||||
self.clean()
|
||||
|
||||
def _patch_backup_csv_file(self):
|
||||
def _patch_legacy_php_versions_in_csv_file(self):
|
||||
"""
|
||||
Apply dirty patch to redirect php5 file on php7
|
||||
Apply dirty patch to redirect php5 and php7.0 files to php7.3
|
||||
"""
|
||||
|
||||
backup_csv = os.path.join(self.work_dir, 'backup.csv')
|
||||
|
@ -1146,32 +1166,27 @@ class RestoreManager():
|
|||
if not os.path.isfile(backup_csv):
|
||||
return
|
||||
|
||||
contains_php5 = False
|
||||
replaced_something = False
|
||||
with open(backup_csv) as csvfile:
|
||||
reader = csv.DictReader(csvfile, fieldnames=['source', 'dest'])
|
||||
newlines = []
|
||||
for row in reader:
|
||||
if 'php5' in row['source']:
|
||||
contains_php5 = True
|
||||
row['source'] = row['source'].replace('/etc/php5', '/etc/php/7.0') \
|
||||
.replace('/var/run/php5-fpm', '/var/run/php/php7.0-fpm') \
|
||||
.replace('php5', 'php7')
|
||||
for pattern, replace in LEGACY_PHP_VERSION_REPLACEMENTS:
|
||||
if pattern in row['source']:
|
||||
replaced_something = True
|
||||
row['source'] = row['source'].replace(pattern, replace)
|
||||
|
||||
newlines.append(row)
|
||||
|
||||
if not contains_php5:
|
||||
if not replaced_something:
|
||||
return
|
||||
|
||||
try:
|
||||
with open(backup_csv, 'w') as csvfile:
|
||||
writer = csv.DictWriter(csvfile,
|
||||
fieldnames=['source', 'dest'],
|
||||
quoting=csv.QUOTE_ALL)
|
||||
for row in newlines:
|
||||
writer.writerow(row)
|
||||
except (IOError, OSError, csv.Error) as e:
|
||||
logger.warning(m18n.n('backup_php5_to_php7_migration_may_fail',
|
||||
error=str(e)))
|
||||
with open(backup_csv, 'w') as csvfile:
|
||||
writer = csv.DictWriter(csvfile,
|
||||
fieldnames=['source', 'dest'],
|
||||
quoting=csv.QUOTE_ALL)
|
||||
for row in newlines:
|
||||
writer.writerow(row)
|
||||
|
||||
def _restore_system(self):
|
||||
""" Restore user and system parts """
|
||||
|
@ -1230,12 +1245,11 @@ class RestoreManager():
|
|||
#
|
||||
# Legacy code
|
||||
if not "all_users" in user_group_list()["groups"].keys():
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
setup_group_permission = _get_migration_by_name("setup_group_permission")
|
||||
from yunohost.utils.legacy import SetupGroupPermissions
|
||||
# Update LDAP schema restart slapd
|
||||
logger.info(m18n.n("migration_0011_update_LDAP_schema"))
|
||||
regen_conf(names=['slapd'], force=True)
|
||||
setup_group_permission.migrate_LDAP_db()
|
||||
SetupGroupPermissions.migrate_LDAP_db()
|
||||
|
||||
# Remove all permission for all app which is still in the LDAP
|
||||
for permission_name in user_permission_list(ignore_system_perms=True)["permissions"].keys():
|
||||
|
@ -1297,13 +1311,6 @@ class RestoreManager():
|
|||
else:
|
||||
shutil.copy2(s, d)
|
||||
|
||||
# Start register change on system
|
||||
related_to = [('app', app_instance_name)]
|
||||
operation_logger = OperationLogger('backup_restore_app', related_to)
|
||||
operation_logger.start()
|
||||
|
||||
logger.info(m18n.n("app_start_restore", app=app_instance_name))
|
||||
|
||||
# Check if the app is not already installed
|
||||
if _is_installed(app_instance_name):
|
||||
logger.error(m18n.n('restore_already_installed_app',
|
||||
|
@ -1311,6 +1318,13 @@ class RestoreManager():
|
|||
self.targets.set_result("apps", app_instance_name, "Error")
|
||||
return
|
||||
|
||||
# Start register change on system
|
||||
related_to = [('app', app_instance_name)]
|
||||
operation_logger = OperationLogger('backup_restore_app', related_to)
|
||||
operation_logger.start()
|
||||
|
||||
logger.info(m18n.n("app_start_restore", app=app_instance_name))
|
||||
|
||||
app_dir_in_archive = os.path.join(self.work_dir, 'apps', app_instance_name)
|
||||
app_backup_in_archive = os.path.join(app_dir_in_archive, 'backup')
|
||||
app_settings_in_archive = os.path.join(app_dir_in_archive, 'settings')
|
||||
|
@ -1320,7 +1334,8 @@ class RestoreManager():
|
|||
_patch_legacy_helpers(app_settings_in_archive)
|
||||
|
||||
# Apply dirty patch to make php5 apps compatible with php7
|
||||
_patch_php5(app_settings_in_archive)
|
||||
_patch_legacy_php_versions(app_settings_in_archive)
|
||||
_patch_legacy_php_versions_in_settings(app_settings_in_archive)
|
||||
|
||||
# Delete _common.sh file in backup
|
||||
common_file = os.path.join(app_backup_in_archive, '_common.sh')
|
||||
|
@ -1375,9 +1390,8 @@ class RestoreManager():
|
|||
else:
|
||||
# Otherwise, we need to migrate the legacy permissions of this
|
||||
# app (included in its settings.yml)
|
||||
from yunohost.tools import _get_migration_by_name
|
||||
setup_group_permission = _get_migration_by_name("setup_group_permission")
|
||||
setup_group_permission.migrate_app_permission(app=app_instance_name)
|
||||
from yunohost.utils.legacy import SetupGroupPermissions
|
||||
SetupGroupPermissions.migrate_app_permission(app=app_instance_name)
|
||||
|
||||
# Prepare env. var. to pass to script
|
||||
env_dict = self._get_env_var(app_instance_name)
|
||||
|
@ -1499,19 +1513,19 @@ class BackupMethod(object):
|
|||
method_name
|
||||
|
||||
Public methods:
|
||||
mount_and_backup(self, backup_manager)
|
||||
mount(self, restore_manager)
|
||||
mount_and_backup(self)
|
||||
mount(self)
|
||||
create(cls, method, **kwargs)
|
||||
|
||||
Usage:
|
||||
method = BackupMethod.create("tar")
|
||||
method.mount_and_backup(backup_manager)
|
||||
method = BackupMethod.create("tar", backup_manager)
|
||||
method.mount_and_backup()
|
||||
#or
|
||||
method = BackupMethod.create("copy")
|
||||
method.mount(restore_manager)
|
||||
method = BackupMethod.create("copy", restore_manager)
|
||||
method.mount()
|
||||
"""
|
||||
|
||||
def __init__(self, repo=None):
|
||||
def __init__(self, manager, repo=None):
|
||||
"""
|
||||
BackupMethod constructors
|
||||
|
||||
|
@ -1524,6 +1538,7 @@ class BackupMethod(object):
|
|||
BackupRepository object. If None, the default repo is used :
|
||||
/home/yunohost.backup/archives/
|
||||
"""
|
||||
self.manager = manager
|
||||
self.repo = ARCHIVES_PATH if repo is None else repo
|
||||
|
||||
@property
|
||||
|
@ -1569,18 +1584,13 @@ class BackupMethod(object):
|
|||
"""
|
||||
return False
|
||||
|
||||
def mount_and_backup(self, backup_manager):
|
||||
def mount_and_backup(self):
|
||||
"""
|
||||
Run the backup on files listed by the BackupManager instance
|
||||
|
||||
This method shouldn't be overrided, prefer overriding self.backup() and
|
||||
self.clean()
|
||||
|
||||
Args:
|
||||
backup_manager -- (BackupManager) A backup manager instance that has
|
||||
already done the files collection step.
|
||||
"""
|
||||
self.manager = backup_manager
|
||||
if self.need_mount():
|
||||
self._organize_files()
|
||||
|
||||
|
@ -1589,17 +1599,13 @@ class BackupMethod(object):
|
|||
finally:
|
||||
self.clean()
|
||||
|
||||
def mount(self, restore_manager):
|
||||
def mount(self):
|
||||
"""
|
||||
Mount the archive from RestoreManager instance in the working directory
|
||||
|
||||
This method should be extended.
|
||||
|
||||
Args:
|
||||
restore_manager -- (RestoreManager) A restore manager instance
|
||||
contains an archive to restore.
|
||||
"""
|
||||
self.manager = restore_manager
|
||||
pass
|
||||
|
||||
def clean(self):
|
||||
"""
|
||||
|
@ -1744,7 +1750,7 @@ class BackupMethod(object):
|
|||
shutil.copy(path['source'], dest)
|
||||
|
||||
@classmethod
|
||||
def create(cls, method, *args):
|
||||
def create(cls, method, manager, *args):
|
||||
"""
|
||||
Factory method to create instance of BackupMethod
|
||||
|
||||
|
@ -1760,7 +1766,7 @@ class BackupMethod(object):
|
|||
if not isinstance(method, basestring):
|
||||
methods = []
|
||||
for m in method:
|
||||
methods.append(BackupMethod.create(m, *args))
|
||||
methods.append(BackupMethod.create(m, manager, *args))
|
||||
return methods
|
||||
|
||||
bm_class = {
|
||||
|
@ -1769,9 +1775,9 @@ class BackupMethod(object):
|
|||
'borg': BorgBackupMethod
|
||||
}
|
||||
if method in ["copy", "tar", "borg"]:
|
||||
return bm_class[method](*args)
|
||||
return bm_class[method](manager, *args)
|
||||
else:
|
||||
return CustomBackupMethod(method=method, *args)
|
||||
return CustomBackupMethod(manager, method=method, *args)
|
||||
|
||||
|
||||
class CopyBackupMethod(BackupMethod):
|
||||
|
@ -1781,8 +1787,8 @@ class CopyBackupMethod(BackupMethod):
|
|||
could be the inverse for restoring
|
||||
"""
|
||||
|
||||
def __init__(self, repo=None):
|
||||
super(CopyBackupMethod, self).__init__(repo)
|
||||
def __init__(self, manager, repo=None):
|
||||
super(CopyBackupMethod, self).__init__(manager, repo)
|
||||
|
||||
@property
|
||||
def method_name(self):
|
||||
|
@ -1836,6 +1842,9 @@ class CopyBackupMethod(BackupMethod):
|
|||
"&&", "umount", "-R", self.work_dir])
|
||||
raise YunohostError('backup_cant_mount_uncompress_archive')
|
||||
|
||||
def copy(self, file, target):
|
||||
shutil.copy(file, target)
|
||||
|
||||
|
||||
class TarBackupMethod(BackupMethod):
|
||||
|
||||
|
@ -1843,8 +1852,8 @@ class TarBackupMethod(BackupMethod):
|
|||
This class compress all files to backup in archive.
|
||||
"""
|
||||
|
||||
def __init__(self, repo=None):
|
||||
super(TarBackupMethod, self).__init__(repo)
|
||||
def __init__(self, manager, repo=None):
|
||||
super(TarBackupMethod, self).__init__(manager, repo)
|
||||
|
||||
@property
|
||||
def method_name(self):
|
||||
|
@ -1904,7 +1913,7 @@ class TarBackupMethod(BackupMethod):
|
|||
if not os.path.isfile(link):
|
||||
os.symlink(self._archive_file, link)
|
||||
|
||||
def mount(self, restore_manager):
|
||||
def mount(self):
|
||||
"""
|
||||
Mount the archive. We avoid copy to be able to restore on system without
|
||||
too many space.
|
||||
|
@ -1914,9 +1923,10 @@ class TarBackupMethod(BackupMethod):
|
|||
backup_archive_corrupted -- Raised if the archive appears corrupted
|
||||
backup_archive_cant_retrieve_info_json -- If the info.json file can't be retrieved
|
||||
"""
|
||||
super(TarBackupMethod, self).mount(restore_manager)
|
||||
super(TarBackupMethod, self).mount()
|
||||
|
||||
# Check the archive can be open
|
||||
# Mount the tarball
|
||||
logger.debug(m18n.n("restore_extracting"))
|
||||
try:
|
||||
tar = tarfile.open(self._archive_file, "r:gz")
|
||||
except:
|
||||
|
@ -1929,15 +1939,7 @@ class TarBackupMethod(BackupMethod):
|
|||
except IOError as e:
|
||||
raise YunohostError("backup_archive_corrupted", archive=self._archive_file, error=str(e))
|
||||
|
||||
# FIXME : Is this really useful to close the archive just to
|
||||
# reopen it right after this with the same options ...?
|
||||
tar.close()
|
||||
|
||||
# Mount the tarball
|
||||
logger.debug(m18n.n("restore_extracting"))
|
||||
tar = tarfile.open(self._archive_file, "r:gz")
|
||||
|
||||
if "info.json" in files_in_archive:
|
||||
if "info.json" in tar.getnames():
|
||||
leading_dot = ""
|
||||
tar.extract('info.json', path=self.work_dir)
|
||||
elif "./info.json" in files_in_archive:
|
||||
|
@ -1992,7 +1994,15 @@ class TarBackupMethod(BackupMethod):
|
|||
]
|
||||
tar.extractall(members=subdir_and_files, path=self.work_dir)
|
||||
|
||||
# FIXME : Don't we want to close the tar archive here or at some point ?
|
||||
tar.close()
|
||||
|
||||
def copy(self, file, target):
|
||||
tar = tarfile.open(self._archive_file, "r:gz")
|
||||
file_to_extract = tar.getmember(file)
|
||||
# Remove the path
|
||||
file_to_extract.name = os.path.basename(file_to_extract.name)
|
||||
tar.extract(file_to_extract, path=target)
|
||||
tar.close()
|
||||
|
||||
|
||||
class BorgBackupMethod(BackupMethod):
|
||||
|
@ -2011,6 +2021,9 @@ class BorgBackupMethod(BackupMethod):
|
|||
def mount(self, mnt_path):
|
||||
raise YunohostError('backup_borg_not_implemented')
|
||||
|
||||
def copy(self, file, target):
|
||||
raise YunohostError('backup_borg_not_implemented')
|
||||
|
||||
|
||||
class CustomBackupMethod(BackupMethod):
|
||||
|
||||
|
@ -2020,8 +2033,8 @@ class CustomBackupMethod(BackupMethod):
|
|||
/etc/yunohost/hooks.d/backup_method/
|
||||
"""
|
||||
|
||||
def __init__(self, repo=None, method=None, **kwargs):
|
||||
super(CustomBackupMethod, self).__init__(repo)
|
||||
def __init__(self, manager, repo=None, method=None, **kwargs):
|
||||
super(CustomBackupMethod, self).__init__(manager, repo)
|
||||
self.args = kwargs
|
||||
self.method = method
|
||||
self._need_mount = None
|
||||
|
@ -2062,14 +2075,14 @@ class CustomBackupMethod(BackupMethod):
|
|||
if ret_failed:
|
||||
raise YunohostError('backup_custom_backup_error')
|
||||
|
||||
def mount(self, restore_manager):
|
||||
def mount(self):
|
||||
"""
|
||||
Launch a custom script to mount the custom archive
|
||||
|
||||
Exceptions:
|
||||
backup_custom_mount_error -- Raised if the custom script failed
|
||||
"""
|
||||
super(CustomBackupMethod, self).mount(restore_manager)
|
||||
super(CustomBackupMethod, self).mount()
|
||||
ret = hook_callback('backup_method', [self.method],
|
||||
args=self._get_args('mount'))
|
||||
|
||||
|
@ -2160,9 +2173,9 @@ def backup_create(name=None, description=None, methods=[],
|
|||
|
||||
# Add backup methods
|
||||
if output_directory:
|
||||
methods = BackupMethod.create(methods, output_directory)
|
||||
methods = BackupMethod.create(methods, backup_manager, output_directory)
|
||||
else:
|
||||
methods = BackupMethod.create(methods)
|
||||
methods = BackupMethod.create(methods, backup_manager)
|
||||
|
||||
for method in methods:
|
||||
backup_manager.add(method)
|
||||
|
|
|
@ -29,7 +29,6 @@ import pwd
|
|||
import grp
|
||||
import smtplib
|
||||
import subprocess
|
||||
import dns.resolver
|
||||
import glob
|
||||
|
||||
from datetime import datetime
|
||||
|
@ -42,6 +41,7 @@ from yunohost.vendor.acme_tiny.acme_tiny import get_crt as sign_certificate
|
|||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.utils.network import get_public_ip
|
||||
|
||||
from yunohost.diagnosis import Diagnoser
|
||||
from yunohost.service import _run_service_command
|
||||
from yunohost.regenconf import regen_conf
|
||||
from yunohost.log import OperationLogger
|
||||
|
@ -68,18 +68,6 @@ PRODUCTION_CERTIFICATION_AUTHORITY = "https://acme-v02.api.letsencrypt.org"
|
|||
|
||||
INTERMEDIATE_CERTIFICATE_URL = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem"
|
||||
|
||||
DNS_RESOLVERS = [
|
||||
# FFDN DNS resolvers
|
||||
# See https://www.ffdn.org/wiki/doku.php?id=formations:dns
|
||||
"80.67.169.12", # FDN
|
||||
"80.67.169.40", #
|
||||
"89.234.141.66", # ARN
|
||||
"141.255.128.100", # Aquilenet
|
||||
"141.255.128.101",
|
||||
"89.234.186.18", # Grifon
|
||||
"80.67.188.188" # LDN
|
||||
]
|
||||
|
||||
#
|
||||
# Front-end stuff #
|
||||
#
|
||||
|
@ -115,10 +103,16 @@ def certificate_status(domain_list, full=False):
|
|||
if not full:
|
||||
del status["subject"]
|
||||
del status["CA_name"]
|
||||
del status["ACME_eligible"]
|
||||
status["CA_type"] = status["CA_type"]["verbose"]
|
||||
status["summary"] = status["summary"]["verbose"]
|
||||
|
||||
if full:
|
||||
try:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
status["ACME_eligible"] = True
|
||||
except:
|
||||
status["ACME_eligible"] = False
|
||||
|
||||
del status["domain"]
|
||||
certificates[domain] = status
|
||||
|
||||
|
@ -272,30 +266,36 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False,
|
|||
# Actual install steps
|
||||
for domain in domain_list:
|
||||
|
||||
operation_logger = OperationLogger('letsencrypt_cert_install', [('domain', domain)],
|
||||
args={'force': force, 'no_checks': no_checks,
|
||||
'staging': staging})
|
||||
if not no_checks:
|
||||
try:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Now attempting install of certificate for domain %s!", domain)
|
||||
|
||||
operation_logger = OperationLogger('letsencrypt_cert_install', [('domain', domain)],
|
||||
args={'force': force, 'no_checks': no_checks,
|
||||
'staging': staging})
|
||||
operation_logger.start()
|
||||
|
||||
try:
|
||||
if not no_checks:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
|
||||
operation_logger.start()
|
||||
|
||||
_fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks)
|
||||
except Exception as e:
|
||||
msg = "Certificate installation for %s failed !\nException: %s" % (domain, e)
|
||||
logger.error(msg)
|
||||
operation_logger.error(msg)
|
||||
if no_checks:
|
||||
logger.error("Please consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." % domain)
|
||||
else:
|
||||
_install_cron(no_checks=no_checks)
|
||||
|
||||
logger.success(
|
||||
m18n.n("certmanager_cert_install_success", domain=domain))
|
||||
|
||||
operation_logger.success()
|
||||
except Exception as e:
|
||||
_display_debug_information(domain)
|
||||
msg = "Certificate installation for %s failed !\nException: %s" % (domain, e)
|
||||
logger.error(msg)
|
||||
operation_logger.error(msg)
|
||||
|
||||
|
||||
def certificate_renew(domain_list, force=False, no_checks=False, email=False, staging=False):
|
||||
|
@ -366,32 +366,34 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
|
|||
# Actual renew steps
|
||||
for domain in domain_list:
|
||||
|
||||
operation_logger = OperationLogger('letsencrypt_cert_renew', [('domain', domain)],
|
||||
args={'force': force, 'no_checks': no_checks,
|
||||
'staging': staging, 'email': email})
|
||||
if not no_checks:
|
||||
try:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
if email:
|
||||
logger.error("Sending email with details to root ...")
|
||||
_email_renewing_failed(domain, e)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Now attempting renewing of certificate for domain %s !", domain)
|
||||
|
||||
operation_logger = OperationLogger('letsencrypt_cert_renew', [('domain', domain)],
|
||||
args={'force': force, 'no_checks': no_checks,
|
||||
'staging': staging, 'email': email})
|
||||
operation_logger.start()
|
||||
|
||||
try:
|
||||
if not no_checks:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
|
||||
operation_logger.start()
|
||||
|
||||
_fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks)
|
||||
|
||||
logger.success(
|
||||
m18n.n("certmanager_cert_renew_success", domain=domain))
|
||||
|
||||
operation_logger.success()
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
from StringIO import StringIO
|
||||
stack = StringIO()
|
||||
traceback.print_exc(file=stack)
|
||||
msg = "Certificate renewing for %s failed !" % (domain)
|
||||
if no_checks:
|
||||
msg += "\nPlease consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." % domain
|
||||
logger.error(msg)
|
||||
operation_logger.error(msg)
|
||||
logger.error(stack.getvalue())
|
||||
|
@ -399,7 +401,11 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
|
|||
|
||||
if email:
|
||||
logger.error("Sending email with details to root ...")
|
||||
_email_renewing_failed(domain, e, stack.getvalue())
|
||||
_email_renewing_failed(domain, msg + "\n" + e, stack.getvalue())
|
||||
else:
|
||||
logger.success(
|
||||
m18n.n("certmanager_cert_renew_success", domain=domain))
|
||||
operation_logger.success()
|
||||
|
||||
#
|
||||
# Back-end stuff #
|
||||
|
@ -431,7 +437,7 @@ def _install_cron(no_checks=False):
|
|||
_set_permissions(cron_job_file, "root", "root", 0o755)
|
||||
|
||||
|
||||
def _email_renewing_failed(domain, exception_message, stack):
|
||||
def _email_renewing_failed(domain, exception_message, stack=""):
|
||||
from_ = "certmanager@%s (Certificate Manager)" % domain
|
||||
to_ = "root"
|
||||
subject_ = "Certificate renewing attempt for %s failed!" % domain
|
||||
|
@ -526,7 +532,6 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
|
|||
raise YunohostError('certmanager_hit_rate_limit', domain=domain)
|
||||
else:
|
||||
logger.error(str(e))
|
||||
_display_debug_information(domain)
|
||||
raise YunohostError('certmanager_cert_signing_failed')
|
||||
|
||||
except Exception as e:
|
||||
|
@ -596,10 +601,10 @@ def _prepare_certificate_signing_request(domain, key_file, output_folder):
|
|||
# For "parent" domains, include xmpp-upload subdomain in subject alternate names
|
||||
if domain in domain_list(exclude_subdomains=True)["domains"]:
|
||||
subdomain = "xmpp-upload." + domain
|
||||
try:
|
||||
_dns_ip_match_public_ip(get_public_ip(), subdomain)
|
||||
xmpp_records = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "xmpp"}).get("data") or {}
|
||||
if xmpp_records.get("CNAME:xmpp-upload") == "OK":
|
||||
csr.add_extensions([crypto.X509Extension("subjectAltName", False, "DNS:" + subdomain)])
|
||||
except YunohostError:
|
||||
else:
|
||||
logger.warning(m18n.n('certmanager_warning_subdomain_dns_record', subdomain=subdomain, domain=domain))
|
||||
|
||||
# Set the key
|
||||
|
@ -700,12 +705,6 @@ def _get_status(domain):
|
|||
"verbose": "Unknown?",
|
||||
}
|
||||
|
||||
try:
|
||||
_check_domain_is_ready_for_ACME(domain)
|
||||
ACME_eligible = True
|
||||
except:
|
||||
ACME_eligible = False
|
||||
|
||||
return {
|
||||
"domain": domain,
|
||||
"subject": cert_subject,
|
||||
|
@ -713,7 +712,6 @@ def _get_status(domain):
|
|||
"CA_type": CA_type,
|
||||
"validity": days_remaining,
|
||||
"summary": status_summary,
|
||||
"ACME_eligible": ACME_eligible
|
||||
}
|
||||
|
||||
#
|
||||
|
@ -790,70 +788,22 @@ def _backup_current_cert(domain):
|
|||
|
||||
|
||||
def _check_domain_is_ready_for_ACME(domain):
|
||||
public_ip = get_public_ip()
|
||||
|
||||
dnsrecords = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "basic"}, warn_if_no_cache=False) or {}
|
||||
httpreachable = Diagnoser.get_cached_report("web", item={"domain": domain}, warn_if_no_cache=False) or {}
|
||||
|
||||
if not dnsrecords or not httpreachable:
|
||||
raise YunohostError('certmanager_domain_not_diagnosed_yet', domain=domain)
|
||||
|
||||
# Check if IP from DNS matches public IP
|
||||
if not _dns_ip_match_public_ip(public_ip, domain):
|
||||
if not dnsrecords.get("status") in ["SUCCESS", "WARNING"]: # Warning is for missing IPv6 record which ain't critical for ACME
|
||||
raise YunohostError('certmanager_domain_dns_ip_differs_from_public_ip', domain=domain)
|
||||
|
||||
# Check if domain seems to be accessible through HTTP?
|
||||
if not _domain_is_accessible_through_HTTP(public_ip, domain):
|
||||
if not httpreachable.get("status") == "SUCCESS":
|
||||
raise YunohostError('certmanager_domain_http_not_working', domain=domain)
|
||||
|
||||
|
||||
def _get_dns_ip(domain):
|
||||
try:
|
||||
resolver = dns.resolver.Resolver()
|
||||
resolver.nameservers = DNS_RESOLVERS
|
||||
answers = resolver.query(domain, "A")
|
||||
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
|
||||
raise YunohostError('certmanager_error_no_A_record', domain=domain)
|
||||
|
||||
return str(answers[0])
|
||||
|
||||
|
||||
def _dns_ip_match_public_ip(public_ip, domain):
|
||||
return _get_dns_ip(domain) == public_ip
|
||||
|
||||
|
||||
def _domain_is_accessible_through_HTTP(ip, domain):
|
||||
import requests # lazy loading this module for performance reasons
|
||||
try:
|
||||
requests.head("http://" + ip, headers={"Host": domain}, timeout=10)
|
||||
except requests.exceptions.Timeout as e:
|
||||
logger.warning(m18n.n('certmanager_http_check_timeout', domain=domain, ip=ip))
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug("Couldn't reach domain '%s' by requesting this ip '%s' because: %s" % (domain, ip, e))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _get_local_dns_ip(domain):
|
||||
try:
|
||||
resolver = dns.resolver.Resolver()
|
||||
answers = resolver.query(domain, "A")
|
||||
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
|
||||
logger.warning("Failed to resolved domain '%s' locally", domain)
|
||||
return None
|
||||
|
||||
return str(answers[0])
|
||||
|
||||
|
||||
def _display_debug_information(domain):
|
||||
dns_ip = _get_dns_ip(domain)
|
||||
public_ip = get_public_ip()
|
||||
local_dns_ip = _get_local_dns_ip(domain)
|
||||
|
||||
logger.warning("""\
|
||||
Debug information:
|
||||
- domain ip from DNS %s
|
||||
- domain ip from local DNS %s
|
||||
- public ip of the server %s
|
||||
""", dns_ip, local_dns_ip, public_ip)
|
||||
|
||||
|
||||
# FIXME / TODO : ideally this should not be needed. There should be a proper
|
||||
# mechanism to regularly check the value of the public IP and trigger
|
||||
# corresponding hooks (e.g. dyndns update and dnsmasq regen-conf)
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
import subprocess
|
||||
import glob
|
||||
from yunohost.tools import Migration
|
||||
from moulinette.utils.filesystem import chown
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Change certificates group permissions from 'metronome' to 'ssl-cert'"
|
||||
|
||||
all_certificate_files = glob.glob("/etc/yunohost/certs/*/*.pem")
|
||||
|
||||
def run(self):
|
||||
for filename in self.all_certificate_files:
|
||||
chown(filename, uid="root", gid="ssl-cert")
|
|
@ -1,86 +0,0 @@
|
|||
import glob
|
||||
import os
|
||||
import requests
|
||||
import base64
|
||||
import time
|
||||
import json
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.dyndns import _guess_current_dyndns_domain
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate Dyndns stuff from MD5 TSIG to SHA512 TSIG"
|
||||
|
||||
def run(self, dyn_host="dyndns.yunohost.org", domain=None, private_key_path=None):
|
||||
|
||||
if domain is None or private_key_path is None:
|
||||
try:
|
||||
(domain, private_key_path) = _guess_current_dyndns_domain(dyn_host)
|
||||
assert "+157" in private_key_path
|
||||
except (YunohostError, AssertionError):
|
||||
logger.info(m18n.n("migrate_tsig_not_needed"))
|
||||
return
|
||||
|
||||
logger.info(m18n.n('migrate_tsig_start', domain=domain))
|
||||
public_key_path = private_key_path.rsplit(".private", 1)[0] + ".key"
|
||||
public_key_md5 = open(public_key_path).read().strip().split(' ')[-1]
|
||||
|
||||
os.system('cd /etc/yunohost/dyndns && '
|
||||
'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain)
|
||||
os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private')
|
||||
|
||||
# +165 means that this file store a hmac-sha512 key
|
||||
new_key_path = glob.glob('/etc/yunohost/dyndns/*+165*.key')[0]
|
||||
public_key_sha512 = open(new_key_path).read().strip().split(' ', 6)[-1]
|
||||
|
||||
try:
|
||||
r = requests.put('https://%s/migrate_key_to_sha512/' % (dyn_host),
|
||||
data={
|
||||
'public_key_md5': base64.b64encode(public_key_md5),
|
||||
'public_key_sha512': base64.b64encode(public_key_sha512),
|
||||
}, timeout=30)
|
||||
except requests.ConnectionError:
|
||||
raise YunohostError('no_internet_connection')
|
||||
|
||||
if r.status_code != 201:
|
||||
try:
|
||||
error = json.loads(r.text)['error']
|
||||
except Exception:
|
||||
# failed to decode json
|
||||
error = r.text
|
||||
|
||||
import traceback
|
||||
from StringIO import StringIO
|
||||
stack = StringIO()
|
||||
traceback.print_stack(file=stack)
|
||||
logger.error(stack.getvalue())
|
||||
|
||||
# Migration didn't succeed, so we rollback and raise an exception
|
||||
os.system("mv /etc/yunohost/dyndns/*+165* /tmp")
|
||||
|
||||
raise YunohostError('migrate_tsig_failed', domain=domain,
|
||||
error_code=str(r.status_code), error=error)
|
||||
|
||||
# remove old certificates
|
||||
os.system("mv /etc/yunohost/dyndns/*+157* /tmp")
|
||||
|
||||
# sleep to wait for dyndns cache invalidation
|
||||
logger.info(m18n.n('migrate_tsig_wait'))
|
||||
time.sleep(60)
|
||||
logger.info(m18n.n('migrate_tsig_wait_2'))
|
||||
time.sleep(60)
|
||||
logger.info(m18n.n('migrate_tsig_wait_3'))
|
||||
time.sleep(30)
|
||||
logger.info(m18n.n('migrate_tsig_wait_4'))
|
||||
time.sleep(30)
|
||||
|
||||
logger.info(m18n.n('migrate_tsig_end'))
|
||||
return
|
|
@ -1,379 +0,0 @@
|
|||
import glob
|
||||
import os
|
||||
from shutil import copy2
|
||||
|
||||
from moulinette import m18n, msettings
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.process import check_output, call_async_output
|
||||
from moulinette.utils.filesystem import read_file
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.app import unstable_apps
|
||||
from yunohost.service import _run_service_command
|
||||
from yunohost.regenconf import (manually_modified_files,
|
||||
manually_modified_files_compared_to_debian_default)
|
||||
from yunohost.utils.filesystem import free_space_in_directory
|
||||
from yunohost.utils.packages import get_installed_version
|
||||
from yunohost.utils.network import get_network_interfaces
|
||||
from yunohost.firewall import firewall_allow, firewall_disallow
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
YUNOHOST_PACKAGES = ["yunohost", "yunohost-admin", "moulinette", "ssowat"]
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Upgrade the system to Debian Stretch and Yunohost 3.0"
|
||||
|
||||
mode = "manual"
|
||||
|
||||
def run(self):
|
||||
|
||||
self.logfile = "/var/log/yunohost/{}.log".format(self.name)
|
||||
|
||||
self.check_assertions()
|
||||
|
||||
logger.info(m18n.n("migration_0003_start", logfile=self.logfile))
|
||||
|
||||
# Preparing the upgrade
|
||||
self.restore_original_nginx_conf_if_needed()
|
||||
|
||||
logger.info(m18n.n("migration_0003_patching_sources_list"))
|
||||
self.patch_apt_sources_list()
|
||||
self.backup_files_to_keep()
|
||||
self.apt_update()
|
||||
apps_packages = self.get_apps_equivs_packages()
|
||||
self.unhold(["metronome"])
|
||||
self.hold(YUNOHOST_PACKAGES + apps_packages + ["fail2ban"])
|
||||
|
||||
# Main dist-upgrade
|
||||
logger.info(m18n.n("migration_0003_main_upgrade"))
|
||||
_run_service_command("stop", "mysql")
|
||||
self.apt_dist_upgrade(conf_flags=["old", "miss", "def"])
|
||||
_run_service_command("start", "mysql")
|
||||
if self.debian_major_version() == 8:
|
||||
raise YunohostError("migration_0003_still_on_jessie_after_main_upgrade", log=self.logfile)
|
||||
|
||||
# Specific upgrade for fail2ban...
|
||||
logger.info(m18n.n("migration_0003_fail2ban_upgrade"))
|
||||
self.unhold(["fail2ban"])
|
||||
# Don't move this if folder already exists. If it does, we probably are
|
||||
# running this script a 2nd, 3rd, ... time but /etc/fail2ban will
|
||||
# be re-created only for the first dist-upgrade of fail2ban
|
||||
if not os.path.exists("/etc/fail2ban.old"):
|
||||
os.system("mv /etc/fail2ban /etc/fail2ban.old")
|
||||
self.apt_dist_upgrade(conf_flags=["new", "miss", "def"])
|
||||
_run_service_command("restart", "fail2ban")
|
||||
|
||||
self.disable_predicable_interface_names()
|
||||
|
||||
# Clean the mess
|
||||
os.system("apt autoremove --assume-yes")
|
||||
os.system("apt clean --assume-yes")
|
||||
|
||||
# We moved to port 587 for SMTP
|
||||
# https://busylog.net/smtp-tls-ssl-25-465-587/
|
||||
firewall_allow("Both", 587)
|
||||
firewall_disallow("Both", 465)
|
||||
|
||||
# Upgrade yunohost packages
|
||||
logger.info(m18n.n("migration_0003_yunohost_upgrade"))
|
||||
self.restore_files_to_keep()
|
||||
self.unhold(YUNOHOST_PACKAGES + apps_packages)
|
||||
self.upgrade_yunohost_packages()
|
||||
|
||||
def debian_major_version(self):
|
||||
# The python module "platform" and lsb_release are not reliable because
|
||||
# on some setup, they still return Release=8 even after upgrading to
|
||||
# stretch ... (Apparently this is related to OVH overriding some stuff
|
||||
# with /etc/lsb-release for instance -_-)
|
||||
# Instead, we rely on /etc/os-release which should be the raw info from
|
||||
# the distribution...
|
||||
return int(check_output("grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2"))
|
||||
|
||||
def yunohost_major_version(self):
|
||||
return int(get_installed_version("yunohost").split('.')[0])
|
||||
|
||||
def check_assertions(self):
|
||||
|
||||
# Be on jessie (8.x) and yunohost 2.x
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be >= 9.x but yunohost package
|
||||
# would still be in 2.x...
|
||||
if not self.debian_major_version() == 8 \
|
||||
and not self.yunohost_major_version() == 2:
|
||||
raise YunohostError("migration_0003_not_jessie")
|
||||
|
||||
# Have > 1 Go free space on /var/ ?
|
||||
if free_space_in_directory("/var/") / (1024**3) < 1.0:
|
||||
raise YunohostError("There is not enough free space in /var/ to run the migration. You need at least 1GB free space")
|
||||
|
||||
# Check system is up to date
|
||||
# (but we don't if 'stretch' is already in the sources.list ...
|
||||
# which means maybe a previous upgrade crashed and we're re-running it)
|
||||
if " stretch " not in read_file("/etc/apt/sources.list"):
|
||||
self.apt_update()
|
||||
apt_list_upgradable = check_output("apt list --upgradable -a")
|
||||
if "upgradable" in apt_list_upgradable:
|
||||
raise YunohostError("migration_0003_system_not_fully_up_to_date")
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
|
||||
# Avoid having a super long disclaimer + uncessary check if we ain't
|
||||
# on jessie / yunohost 2.x anymore
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be >= 9.x but yunohost package
|
||||
# would still be in 2.x...
|
||||
if not self.debian_major_version() == 8 \
|
||||
and not self.yunohost_major_version() == 2:
|
||||
return None
|
||||
|
||||
# Get list of problematic apps ? I.e. not official or community+working
|
||||
problematic_apps = unstable_apps()
|
||||
problematic_apps = "".join(["\n - " + app for app in problematic_apps])
|
||||
|
||||
# Manually modified files ? (c.f. yunohost service regen-conf)
|
||||
modified_files = manually_modified_files()
|
||||
# We also have a specific check for nginx.conf which some people
|
||||
# modified and needs to be upgraded...
|
||||
if "/etc/nginx/nginx.conf" in manually_modified_files_compared_to_debian_default():
|
||||
modified_files.append("/etc/nginx/nginx.conf")
|
||||
modified_files = "".join(["\n - " + f for f in modified_files])
|
||||
|
||||
message = m18n.n("migration_0003_general_warning")
|
||||
|
||||
if problematic_apps:
|
||||
message += "\n\n" + m18n.n("migration_0003_problematic_apps_warning", problematic_apps=problematic_apps)
|
||||
|
||||
if modified_files:
|
||||
message += "\n\n" + m18n.n("migration_0003_modified_files", manually_modified_files=modified_files)
|
||||
|
||||
return message
|
||||
|
||||
def patch_apt_sources_list(self):
|
||||
|
||||
sources_list = glob.glob("/etc/apt/sources.list.d/*.list")
|
||||
sources_list.append("/etc/apt/sources.list")
|
||||
|
||||
# This :
|
||||
# - replace single 'jessie' occurence by 'stretch'
|
||||
# - comments lines containing "backports"
|
||||
# - replace 'jessie/updates' by 'strech/updates' (or same with a -)
|
||||
# - switch yunohost's repo to forge
|
||||
for f in sources_list:
|
||||
command = "sed -i -e 's@ jessie @ stretch @g' " \
|
||||
"-e '/backports/ s@^#*@#@' " \
|
||||
"-e 's@ jessie/updates @ stretch/updates @g' " \
|
||||
"-e 's@ jessie-updates @ stretch-updates @g' " \
|
||||
"-e 's@repo.yunohost@forge.yunohost@g' " \
|
||||
"{}".format(f)
|
||||
os.system(command)
|
||||
|
||||
def get_apps_equivs_packages(self):
|
||||
|
||||
command = "dpkg --get-selections" \
|
||||
" | grep -v deinstall" \
|
||||
" | awk '{print $1}'" \
|
||||
" | { grep 'ynh-deps$' || true; }"
|
||||
|
||||
output = check_output(command).strip()
|
||||
|
||||
return output.split('\n') if output else []
|
||||
|
||||
def hold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark hold {}".format(package))
|
||||
|
||||
def unhold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark unhold {}".format(package))
|
||||
|
||||
def apt_update(self):
|
||||
|
||||
command = "apt-get update"
|
||||
logger.debug("Running apt command :\n{}".format(command))
|
||||
command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
os.system(command)
|
||||
|
||||
def upgrade_yunohost_packages(self):
|
||||
|
||||
#
|
||||
# Here we use a dirty hack to run a command after the current
|
||||
# "yunohost tools migrations migrate", because the upgrade of
|
||||
# yunohost will also trigger another "yunohost tools migrations migrate"
|
||||
# (also the upgrade of the package, if executed from the webadmin, is
|
||||
# likely to kill/restart the api which is in turn likely to kill this
|
||||
# command before it ends...)
|
||||
#
|
||||
|
||||
MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock"
|
||||
|
||||
upgrade_command = ""
|
||||
upgrade_command += " DEBIAN_FRONTEND=noninteractive"
|
||||
upgrade_command += " APT_LISTCHANGES_FRONTEND=none"
|
||||
upgrade_command += " apt-get install"
|
||||
upgrade_command += " --assume-yes "
|
||||
upgrade_command += " ".join(YUNOHOST_PACKAGES)
|
||||
# We also install php-zip and php7.0-acpu to fix an issue with
|
||||
# nextcloud and kanboard that need it when on stretch.
|
||||
upgrade_command += " php-zip php7.0-apcu"
|
||||
upgrade_command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
wait_until_end_of_yunohost_command = "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK)
|
||||
|
||||
command = "({} && {}; echo 'Migration complete!') &".format(wait_until_end_of_yunohost_command,
|
||||
upgrade_command)
|
||||
|
||||
logger.debug("Running command :\n{}".format(command))
|
||||
|
||||
os.system(command)
|
||||
|
||||
def apt_dist_upgrade(self, conf_flags):
|
||||
|
||||
# Make apt-get happy
|
||||
os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections")
|
||||
# Don't send an email to root about the postgresql migration. It should be handled automatically after.
|
||||
os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections")
|
||||
|
||||
command = ""
|
||||
command += " DEBIAN_FRONTEND=noninteractive"
|
||||
command += " APT_LISTCHANGES_FRONTEND=none"
|
||||
command += " apt-get"
|
||||
command += " --fix-broken --show-upgraded --assume-yes"
|
||||
for conf_flag in conf_flags:
|
||||
command += ' -o Dpkg::Options::="--force-conf{}"'.format(conf_flag)
|
||||
command += " dist-upgrade"
|
||||
|
||||
logger.debug("Running apt command :\n{}".format(command))
|
||||
|
||||
command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
is_api = msettings.get('interface') == 'api'
|
||||
if is_api:
|
||||
callbacks = (
|
||||
lambda l: logger.info(l.rstrip()),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
)
|
||||
call_async_output(command, callbacks, shell=True)
|
||||
else:
|
||||
# We do this when running from the cli to have the output of the
|
||||
# command showing in the terminal, since 'info' channel is only
|
||||
# enabled if the user explicitly add --verbose ...
|
||||
os.system(command)
|
||||
|
||||
# Those are files that should be kept and restored before the final switch
|
||||
# to yunohost 3.x... They end up being modified by the various dist-upgrades
|
||||
# (or need to be taken out momentarily), which then blocks the regen-conf
|
||||
# as they are flagged as "manually modified"...
|
||||
files_to_keep = [
|
||||
"/etc/mysql/my.cnf",
|
||||
"/etc/nslcd.conf",
|
||||
"/etc/postfix/master.cf",
|
||||
"/etc/fail2ban/filter.d/yunohost.conf"
|
||||
]
|
||||
|
||||
def backup_files_to_keep(self):
|
||||
|
||||
logger.debug("Backuping specific files to keep ...")
|
||||
|
||||
# Create tmp directory if it does not exists
|
||||
tmp_dir = os.path.join("/tmp/", self.name)
|
||||
if not os.path.exists(tmp_dir):
|
||||
os.mkdir(tmp_dir, 0o700)
|
||||
|
||||
for f in self.files_to_keep:
|
||||
dest_file = f.strip('/').replace("/", "_")
|
||||
|
||||
# If the file is already there, we might be re-running the migration
|
||||
# because it previously crashed. Hence we keep the existing file.
|
||||
if os.path.exists(os.path.join(tmp_dir, dest_file)):
|
||||
continue
|
||||
|
||||
copy2(f, os.path.join(tmp_dir, dest_file))
|
||||
|
||||
def restore_files_to_keep(self):
|
||||
|
||||
logger.debug("Restoring specific files to keep ...")
|
||||
|
||||
tmp_dir = os.path.join("/tmp/", self.name)
|
||||
|
||||
for f in self.files_to_keep:
|
||||
dest_file = f.strip('/').replace("/", "_")
|
||||
copy2(os.path.join(tmp_dir, dest_file), f)
|
||||
|
||||
# On some setups, /etc/nginx/nginx.conf got edited. But this file needs
|
||||
# to be upgraded because of the way the new module system works for nginx.
|
||||
# (in particular, having the line that include the modules at the top)
|
||||
#
|
||||
# So here, if it got edited, we force the restore of the original conf
|
||||
# *before* starting the actual upgrade...
|
||||
#
|
||||
# An alternative strategy that was attempted was to hold the nginx-common
|
||||
# package and have a specific upgrade for it like for fail2ban, but that
|
||||
# leads to apt complaining about not being able to upgrade for shitty
|
||||
# reasons >.>
|
||||
def restore_original_nginx_conf_if_needed(self):
|
||||
if "/etc/nginx/nginx.conf" not in manually_modified_files_compared_to_debian_default():
|
||||
return
|
||||
|
||||
if not os.path.exists("/etc/nginx/nginx.conf"):
|
||||
return
|
||||
|
||||
# If stretch is in the sources.list, we already started migrating on
|
||||
# stretch so we don't re-do this
|
||||
if " stretch " in read_file("/etc/apt/sources.list"):
|
||||
return
|
||||
|
||||
backup_dest = "/home/yunohost.conf/backup/nginx.conf.bkp_before_stretch"
|
||||
|
||||
logger.warning(m18n.n("migration_0003_restoring_origin_nginx_conf",
|
||||
backup_dest=backup_dest))
|
||||
|
||||
os.system("mv /etc/nginx/nginx.conf %s" % backup_dest)
|
||||
|
||||
command = ""
|
||||
command += " DEBIAN_FRONTEND=noninteractive"
|
||||
command += " APT_LISTCHANGES_FRONTEND=none"
|
||||
command += " apt-get"
|
||||
command += " --fix-broken --show-upgraded --assume-yes"
|
||||
command += ' -o Dpkg::Options::="--force-confmiss"'
|
||||
command += " install --reinstall"
|
||||
command += " nginx-common"
|
||||
|
||||
logger.debug("Running apt command :\n{}".format(command))
|
||||
|
||||
command += " 2>&1 | tee -a {}".format(self.logfile)
|
||||
|
||||
is_api = msettings.get('interface') == 'api'
|
||||
if is_api:
|
||||
callbacks = (
|
||||
lambda l: logger.info(l.rstrip()),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
)
|
||||
call_async_output(command, callbacks, shell=True)
|
||||
else:
|
||||
# We do this when running from the cli to have the output of the
|
||||
# command showing in the terminal, since 'info' channel is only
|
||||
# enabled if the user explicitly add --verbose ...
|
||||
os.system(command)
|
||||
|
||||
def disable_predicable_interface_names(self):
|
||||
|
||||
# Try to see if currently used interface names are predictable ones or not...
|
||||
# If we ain't using "eth0" or "wlan0", assume we are using predictable interface
|
||||
# names and therefore they shouldnt be disabled
|
||||
network_interfaces = get_network_interfaces().keys()
|
||||
if "eth0" not in network_interfaces and "wlan0" not in network_interfaces:
|
||||
return
|
||||
|
||||
interfaces_config = read_file("/etc/network/interfaces")
|
||||
if "eth0" not in interfaces_config and "wlan0" not in interfaces_config:
|
||||
return
|
||||
|
||||
# Disable predictive interface names
|
||||
# c.f. https://unix.stackexchange.com/a/338730
|
||||
os.system("ln -s /dev/null /etc/systemd/network/99-default.link")
|
|
@ -1,99 +0,0 @@
|
|||
import os
|
||||
import glob
|
||||
from shutil import copy2
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.service import _run_service_command
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
PHP5_POOLS = "/etc/php5/fpm/pool.d"
|
||||
PHP7_POOLS = "/etc/php/7.0/fpm/pool.d"
|
||||
|
||||
PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm"
|
||||
PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
|
||||
|
||||
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS)
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate php5-fpm 'pool' conf files to php7 stuff"
|
||||
|
||||
dependencies = ["migrate_to_stretch"]
|
||||
|
||||
def run(self):
|
||||
# Get list of php5 pool files
|
||||
php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS))
|
||||
|
||||
# Keep only basenames
|
||||
php5_pool_files = [os.path.basename(f) for f in php5_pool_files]
|
||||
|
||||
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
|
||||
php5_pool_files = [f for f in php5_pool_files if f != "www.conf"]
|
||||
|
||||
for f in php5_pool_files:
|
||||
|
||||
# Copy the files to the php7 pool
|
||||
src = "{}/{}".format(PHP5_POOLS, f)
|
||||
dest = "{}/{}".format(PHP7_POOLS, f)
|
||||
copy2(src, dest)
|
||||
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest)
|
||||
os.system(c)
|
||||
|
||||
# Also add a comment that it was automatically moved from php5
|
||||
# (for human traceability and backward migration)
|
||||
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
|
||||
os.system(c)
|
||||
|
||||
# Some old comments starting with '#' instead of ';' are not
|
||||
# compatible in php7
|
||||
c = "sed -i 's/^#/;#/g' {}".format(dest)
|
||||
os.system(c)
|
||||
|
||||
# Reload/restart the php pools
|
||||
_run_service_command("restart", "php7.0-fpm")
|
||||
_run_service_command("enable", "php7.0-fpm")
|
||||
os.system("systemctl stop php5-fpm")
|
||||
os.system("systemctl disable php5-fpm")
|
||||
os.system("rm /etc/logrotate.d/php5-fpm") # We remove this otherwise the logrotate cron will be unhappy
|
||||
|
||||
# Get list of nginx conf file
|
||||
nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf")
|
||||
for f in nginx_conf_files:
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, f)
|
||||
os.system(c)
|
||||
|
||||
# Reload nginx
|
||||
_run_service_command("reload", "nginx")
|
||||
|
||||
def backward(self):
|
||||
|
||||
# Get list of php7 pool files
|
||||
php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS))
|
||||
|
||||
# Keep only files which have the migration comment
|
||||
php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT]
|
||||
|
||||
# Delete those files
|
||||
for f in php7_pool_files:
|
||||
os.remove(f)
|
||||
|
||||
# Reload/restart the php pools
|
||||
_run_service_command("stop", "php7.0-fpm")
|
||||
os.system("systemctl start php5-fpm")
|
||||
|
||||
# Get list of nginx conf file
|
||||
nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf")
|
||||
for f in nginx_conf_files:
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP7_SOCKETS_PREFIX, PHP5_SOCKETS_PREFIX, f)
|
||||
os.system(c)
|
||||
|
||||
# Reload nginx
|
||||
_run_service_command("reload", "nginx")
|
|
@ -1,41 +0,0 @@
|
|||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate DBs from Postgresql 9.4 to 9.6 after migrating to Stretch"
|
||||
|
||||
dependencies = ["migrate_to_stretch"]
|
||||
|
||||
def run(self):
|
||||
|
||||
if not self.package_is_installed("postgresql-9.4"):
|
||||
logger.warning(m18n.n("migration_0005_postgresql_94_not_installed"))
|
||||
return
|
||||
|
||||
if not self.package_is_installed("postgresql-9.6"):
|
||||
raise YunohostError("migration_0005_postgresql_96_not_installed")
|
||||
|
||||
if not space_used_by_directory("/var/lib/postgresql/9.4") > free_space_in_directory("/var/lib/postgresql"):
|
||||
raise YunohostError("migration_0005_not_enough_space", path="/var/lib/postgresql/")
|
||||
|
||||
subprocess.check_call("service postgresql stop", shell=True)
|
||||
subprocess.check_call("pg_dropcluster --stop 9.6 main", shell=True)
|
||||
subprocess.check_call("pg_upgradecluster -m upgrade 9.4 main", shell=True)
|
||||
subprocess.check_call("pg_dropcluster --stop 9.4 main", shell=True)
|
||||
subprocess.check_call("service postgresql start", shell=True)
|
||||
|
||||
def package_is_installed(self, package_name):
|
||||
|
||||
p = subprocess.Popen("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), shell=True)
|
||||
p.communicate()
|
||||
return p.returncode == 0
|
|
@ -1,78 +0,0 @@
|
|||
import spwd
|
||||
import crypt
|
||||
import random
|
||||
import string
|
||||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.process import run_commands, check_output
|
||||
from moulinette.utils.filesystem import append_to_file
|
||||
from moulinette.authenticators.ldap import Authenticator
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
SMALL_PWD_LIST = ["yunohost", "olinuxino", "olinux", "raspberry", "admin", "root", "test", "rpi"]
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Synchronize admin and root passwords"
|
||||
|
||||
def run(self):
|
||||
|
||||
new_hash = self._get_admin_hash()
|
||||
self._replace_root_hash(new_hash)
|
||||
|
||||
logger.info(m18n.n("root_password_replaced_by_admin_password"))
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
|
||||
# If the root password is still a "default" value,
|
||||
# then this is an emergency and migration shall
|
||||
# be applied automatically
|
||||
#
|
||||
# Otherwise, as playing with root password is touchy,
|
||||
# we set this as a manual migration.
|
||||
return "auto" if self._is_root_pwd_listed(SMALL_PWD_LIST) else "manual"
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
if self._is_root_pwd_listed(SMALL_PWD_LIST):
|
||||
return None
|
||||
|
||||
return m18n.n("migration_0006_disclaimer")
|
||||
|
||||
def _get_admin_hash(self):
|
||||
"""
|
||||
Fetch the admin hash from the LDAP db using slapcat
|
||||
"""
|
||||
admin_hash = check_output("slapcat \
|
||||
| grep 'dn: cn=admin,dc=yunohost,dc=org' -A20 \
|
||||
| grep userPassword -A2 \
|
||||
| tr -d '\n ' \
|
||||
| tr ':' ' ' \
|
||||
| awk '{print $2}' \
|
||||
| base64 -d \
|
||||
| sed 's/{CRYPT}//g'")
|
||||
return admin_hash
|
||||
|
||||
def _replace_root_hash(self, new_hash):
|
||||
hash_root = spwd.getspnam("root").sp_pwd
|
||||
|
||||
with open('/etc/shadow', 'r') as before_file:
|
||||
before = before_file.read()
|
||||
|
||||
with open('/etc/shadow', 'w') as after_file:
|
||||
after_file.write(before.replace("root:" + hash_root,
|
||||
"root:" + new_hash))
|
||||
|
||||
def _is_root_pwd_listed(self, pwd_list):
|
||||
hash_root = spwd.getspnam("root").sp_pwd
|
||||
|
||||
for password in pwd_list:
|
||||
if hash_root == crypt.crypt(password, hash_root):
|
||||
return True
|
||||
return False
|
|
@ -1,70 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
from shutil import copyfile
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import mkdir, rm
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.service import _run_service_command
|
||||
from yunohost.regenconf import regen_conf
|
||||
from yunohost.settings import settings_set
|
||||
from yunohost.utils.error import YunohostError
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
SSHD_CONF = '/etc/ssh/sshd_config'
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"""
|
||||
This is the first step of a couple of migrations that ensure SSH conf is
|
||||
managed by YunoHost (even if the "from_script" flag is present, which was
|
||||
previously preventing it from being managed by YunoHost)
|
||||
|
||||
The goal of this first (automatic) migration is to make sure that the
|
||||
sshd_config is managed by the regen-conf mechanism.
|
||||
|
||||
If the from_script flag exists, then we keep the current SSH conf such that it
|
||||
will appear as "manually modified" to the regenconf.
|
||||
|
||||
In step 2 (manual), the admin will be able to choose wether or not to actually
|
||||
use the recommended configuration, with an appropriate disclaimer.
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
|
||||
# Check if deprecated DSA Host Key is in config
|
||||
dsa_rgx = r'^[ \t]*HostKey[ \t]+/etc/ssh/ssh_host_dsa_key[ \t]*(?:#.*)?$'
|
||||
dsa = False
|
||||
for line in open(SSHD_CONF):
|
||||
if re.match(dsa_rgx, line) is not None:
|
||||
dsa = True
|
||||
break
|
||||
if dsa:
|
||||
settings_set("service.ssh.allow_deprecated_dsa_hostkey", True)
|
||||
|
||||
# Here, we make it so that /etc/ssh/sshd_config is managed
|
||||
# by the regen conf (in particular in the case where the
|
||||
# from_script flag is present - in which case it was *not*
|
||||
# managed by the regenconf)
|
||||
# But because we can't be sure the user wants to use the
|
||||
# recommended conf, we backup then restore the /etc/ssh/sshd_config
|
||||
# right after the regenconf, such that it will appear as
|
||||
# "manually modified".
|
||||
if os.path.exists('/etc/yunohost/from_script'):
|
||||
rm('/etc/yunohost/from_script')
|
||||
copyfile(SSHD_CONF, '/etc/ssh/sshd_config.bkp')
|
||||
regen_conf(names=['ssh'], force=True)
|
||||
copyfile('/etc/ssh/sshd_config.bkp', SSHD_CONF)
|
||||
|
||||
# Restart ssh and rollback if it failed
|
||||
if not _run_service_command('restart', 'ssh'):
|
||||
# We don't rollback completely but it should be enough
|
||||
copyfile('/etc/ssh/sshd_config.bkp', SSHD_CONF)
|
||||
if not _run_service_command('restart', 'ssh'):
|
||||
raise YunohostError("migration_0007_cannot_restart")
|
||||
else:
|
||||
raise YunohostError("migration_0007_cancelled")
|
|
@ -1,105 +0,0 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
from moulinette import m18n
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import chown
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.regenconf import _get_conf_hashes, _calculate_hash
|
||||
from yunohost.regenconf import regen_conf
|
||||
from yunohost.settings import settings_set, settings_get
|
||||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.backup import ARCHIVES_PATH
|
||||
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
SSHD_CONF = '/etc/ssh/sshd_config'
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"""
|
||||
In this second step, the admin is asked if it's okay to use
|
||||
the recommended SSH configuration - which also implies
|
||||
disabling deprecated DSA key.
|
||||
|
||||
This has important implications in the way the user may connect
|
||||
to its server (key change, and a spooky warning might be given
|
||||
by SSH later)
|
||||
|
||||
A disclaimer explaining the various things to be aware of is
|
||||
shown - and the user may also choose to skip this migration.
|
||||
"""
|
||||
|
||||
dependencies = ["ssh_conf_managed_by_yunohost_step1"]
|
||||
|
||||
def run(self):
|
||||
settings_set("service.ssh.allow_deprecated_dsa_hostkey", False)
|
||||
regen_conf(names=['ssh'], force=True)
|
||||
|
||||
# Update local archives folder permissions, so that
|
||||
# admin can scp archives out of the server
|
||||
if os.path.isdir(ARCHIVES_PATH):
|
||||
chown(ARCHIVES_PATH, uid="admin", gid="root")
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
|
||||
# If the conf is already up to date
|
||||
# and no DSA key is used, then we're good to go
|
||||
# and the migration can be done automatically
|
||||
# (basically nothing shall change)
|
||||
ynh_hash = _get_conf_hashes('ssh').get(SSHD_CONF, None)
|
||||
current_hash = _calculate_hash(SSHD_CONF)
|
||||
dsa = settings_get("service.ssh.allow_deprecated_dsa_hostkey")
|
||||
if ynh_hash == current_hash and not dsa:
|
||||
return "auto"
|
||||
|
||||
return "manual"
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
|
||||
if self.mode == "auto":
|
||||
return None
|
||||
|
||||
# Detect key things to be aware of before enabling the
|
||||
# recommended configuration
|
||||
dsa_key_enabled = False
|
||||
ports = []
|
||||
root_login = []
|
||||
port_rgx = r'^[ \t]*Port[ \t]+(\d+)[ \t]*(?:#.*)?$'
|
||||
root_rgx = r'^[ \t]*PermitRootLogin[ \t]([^# \t]*)[ \t]*(?:#.*)?$'
|
||||
dsa_rgx = r'^[ \t]*HostKey[ \t]+/etc/ssh/ssh_host_dsa_key[ \t]*(?:#.*)?$'
|
||||
for line in open(SSHD_CONF):
|
||||
|
||||
ports = ports + re.findall(port_rgx, line)
|
||||
|
||||
root_login = root_login + re.findall(root_rgx, line)
|
||||
|
||||
if not dsa_key_enabled and re.match(dsa_rgx, line) is not None:
|
||||
dsa_key_enabled = True
|
||||
|
||||
custom_port = ports != ['22'] and ports != []
|
||||
root_login_enabled = root_login and root_login[-1] != 'no'
|
||||
|
||||
# Build message
|
||||
message = m18n.n("migration_0008_general_disclaimer")
|
||||
|
||||
if custom_port:
|
||||
message += "\n\n" + m18n.n("migration_0008_port")
|
||||
|
||||
if root_login_enabled:
|
||||
message += "\n\n" + m18n.n("migration_0008_root")
|
||||
|
||||
if dsa_key_enabled:
|
||||
message += "\n\n" + m18n.n("migration_0008_dsa")
|
||||
|
||||
if custom_port or root_login_enabled or dsa_key_enabled:
|
||||
message += "\n\n" + m18n.n("migration_0008_warning")
|
||||
else:
|
||||
message += "\n\n" + m18n.n("migration_0008_no_warning")
|
||||
|
||||
return message
|
|
@ -1,39 +0,0 @@
|
|||
import os
|
||||
|
||||
from moulinette import m18n
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from moulinette.utils.filesystem import read_file
|
||||
from yunohost.service import _get_services, _save_services
|
||||
from yunohost.regenconf import _update_conf_hashes, REGEN_CONF_FILE
|
||||
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
"""
|
||||
Decouple the regen conf mechanism from the concept of services
|
||||
"""
|
||||
|
||||
def run(self):
|
||||
|
||||
if "conffiles" not in read_file("/etc/yunohost/services.yml") \
|
||||
or os.path.exists(REGEN_CONF_FILE):
|
||||
logger.warning(m18n.n("migration_0009_not_needed"))
|
||||
return
|
||||
|
||||
# For all services
|
||||
services = _get_services()
|
||||
for service, infos in services.items():
|
||||
# If there are some conffiles (file hashes)
|
||||
if "conffiles" in infos.keys():
|
||||
# Save them using the new regen conf thingy
|
||||
_update_conf_hashes(service, infos["conffiles"])
|
||||
# And delete the old conffile key from the service infos
|
||||
del services[service]["conffiles"]
|
||||
|
||||
# (Actually save the modification of services)
|
||||
_save_services(services)
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
from moulinette.utils.log import getActionLogger
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate from official.json to apps.json (outdated, replaced by migration 13)"
|
||||
|
||||
def run(self):
|
||||
logger.info("This migration is oudated and doesn't do anything anymore. The migration 13 will handle this instead.")
|
||||
pass
|
|
@ -1,16 +0,0 @@
|
|||
import glob
|
||||
import re
|
||||
from yunohost.tools import Migration
|
||||
from moulinette.utils.filesystem import read_file, write_to_file
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Force authentication in md5 for local connexions"
|
||||
|
||||
all_hba_files = glob.glob("/etc/postgresql/*/*/pg_hba.conf")
|
||||
|
||||
def run(self):
|
||||
for filename in self.all_hba_files:
|
||||
pg_hba_in = read_file(filename)
|
||||
write_to_file(filename, re.sub(r"local(\s*)all(\s*)all(\s*)password", "local\\1all\\2all\\3md5", pg_hba_in))
|
|
@ -1,51 +0,0 @@
|
|||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_json
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.app import (_initialize_apps_catalog_system,
|
||||
_update_apps_catalog,
|
||||
APPS_CATALOG_CACHE,
|
||||
APPS_CATALOG_CONF)
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
LEGACY_APPS_CATALOG_CONF = '/etc/yunohost/appslists.json'
|
||||
LEGACY_APPS_CATALOG_CONF_BACKUP = LEGACY_APPS_CATALOG_CONF + ".old"
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate to the new future-proof apps catalog system"
|
||||
|
||||
def run(self):
|
||||
|
||||
if not os.path.exists(LEGACY_APPS_CATALOG_CONF):
|
||||
logger.info("No need to do anything")
|
||||
|
||||
# Destroy old lecacy cache
|
||||
if os.path.exists(APPS_CATALOG_CACHE):
|
||||
shutil.rmtree(APPS_CATALOG_CACHE)
|
||||
|
||||
# and legacy cron
|
||||
if os.path.exists("/etc/cron.daily/yunohost-fetch-appslists"):
|
||||
os.remove("/etc/cron.daily/yunohost-fetch-appslists")
|
||||
|
||||
# Backup the legacy file
|
||||
try:
|
||||
legacy_catalogs = read_json(LEGACY_APPS_CATALOG_CONF)
|
||||
# If there's only one catalog, we assume it's just the old official catalog
|
||||
# Otherwise, warn the (power-?)users that they should migrate their old catalogs manually
|
||||
if len(legacy_catalogs) > 1:
|
||||
logger.warning("It looks like you had additional apps_catalog in the configuration file %s! YunoHost now uses %s instead, but it won't migrate your custom apps_catalog. You should do this manually. The old file has been backuped in %s." % (LEGACY_APPS_CATALOG_CONF, APPS_CATALOG_CONF, LEGACY_APPS_CATALOG_CONF_BACKUP))
|
||||
except Exception as e:
|
||||
logger.warning("Unable to parse the legacy conf %s (error : %s) ... migrating anyway" % (LEGACY_APPS_CATALOG_CONF, str(e)))
|
||||
|
||||
if os.path.exists(LEGACY_APPS_CATALOG_CONF):
|
||||
os.rename(LEGACY_APPS_CATALOG_CONF, LEGACY_APPS_CATALOG_CONF_BACKUP)
|
||||
|
||||
_initialize_apps_catalog_system()
|
||||
_update_apps_catalog()
|
|
@ -1,31 +0,0 @@
|
|||
import os
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.filesystem import read_json
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.app import app_setting, APPS_SETTING_PATH
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"""Remove legacy app status.json files"""
|
||||
|
||||
def run(self):
|
||||
|
||||
apps = os.listdir(APPS_SETTING_PATH)
|
||||
|
||||
for app in apps:
|
||||
status_file = os.path.join(APPS_SETTING_PATH, app, "status.json")
|
||||
if not os.path.exists(status_file):
|
||||
continue
|
||||
|
||||
try:
|
||||
status = read_json(status_file)
|
||||
current_revision = status.get("remote", {}).get("revision", "?")
|
||||
app_setting(app, 'current_revision', current_revision)
|
||||
except Exception as e:
|
||||
logger.warning("Could not migrate status.json from app %s: %s", (app, str(e)))
|
||||
else:
|
||||
os.system("rm %s" % status_file)
|
244
src/yunohost/data_migrations/0015_migrate_to_buster.py
Normal file
244
src/yunohost/data_migrations/0015_migrate_to_buster.py
Normal file
|
@ -0,0 +1,244 @@
|
|||
|
||||
import glob
|
||||
import os
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
from moulinette.utils.process import check_output, call_async_output
|
||||
from moulinette.utils.filesystem import read_file
|
||||
|
||||
from yunohost.tools import Migration, tools_update, tools_upgrade
|
||||
from yunohost.app import unstable_apps
|
||||
from yunohost.regenconf import manually_modified_files
|
||||
from yunohost.utils.filesystem import free_space_in_directory
|
||||
from yunohost.utils.packages import get_ynh_package_version, _list_upgradable_apt_packages
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Upgrade the system to Debian Buster and Yunohost 4.x"
|
||||
|
||||
mode = "manual"
|
||||
|
||||
def run(self):
|
||||
|
||||
self.check_assertions()
|
||||
|
||||
logger.info(m18n.n("migration_0015_start"))
|
||||
|
||||
#
|
||||
# Make sure certificates do not use weak signature hash algorithms (md5, sha1)
|
||||
# otherwise nginx will later refuse to start which result in
|
||||
# catastrophic situation
|
||||
#
|
||||
self.validate_and_upgrade_cert_if_necessary()
|
||||
|
||||
#
|
||||
# Patch sources.list
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_patching_sources_list"))
|
||||
self.patch_apt_sources_list()
|
||||
tools_update(system=True)
|
||||
|
||||
# Tell libc6 it's okay to restart system stuff during the upgrade
|
||||
os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections")
|
||||
|
||||
# Don't send an email to root about the postgresql migration. It should be handled automatically after.
|
||||
os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections")
|
||||
|
||||
#
|
||||
# Specific packages upgrades
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_specific_upgrade"))
|
||||
|
||||
# Update unscd independently, was 0.53-1+yunohost on stretch (custom build of ours) but now it's 0.53-1+b1 on vanilla buster,
|
||||
# which for apt appears as a lower version (hence the --allow-downgrades and the hardcoded version number)
|
||||
unscd_version = check_output('dpkg -s unscd | grep "^Version: " | cut -d " " -f 2')
|
||||
if "yunohost" in unscd_version:
|
||||
new_version = check_output("LC_ALL=C apt policy unscd 2>/dev/null | grep -v '\\*\\*\\*' | grep http -B1 | head -n 1 | awk '{print $1}'").strip()
|
||||
if new_version:
|
||||
self.apt_install('unscd=%s --allow-downgrades' % new_version)
|
||||
else:
|
||||
logger.warning("Could not identify which version of unscd to install")
|
||||
|
||||
# Upgrade libpam-modules independently, small issue related to willing to overwrite a file previously provided by Yunohost
|
||||
libpammodules_version = check_output('dpkg -s libpam-modules | grep "^Version: " | cut -d " " -f 2')
|
||||
if not libpammodules_version.startswith("1.3"):
|
||||
self.apt_install('libpam-modules -o Dpkg::Options::="--force-overwrite"')
|
||||
|
||||
#
|
||||
# Main upgrade
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_main_upgrade"))
|
||||
|
||||
apps_packages = self.get_apps_equivs_packages()
|
||||
self.hold(apps_packages)
|
||||
tools_upgrade(system=True, allow_yunohost_upgrade=False)
|
||||
|
||||
if self.debian_major_version() == 9:
|
||||
raise YunohostError("migration_0015_still_on_stretch_after_main_upgrade")
|
||||
|
||||
# Clean the mess
|
||||
logger.info(m18n.n("migration_0015_cleaning_up"))
|
||||
os.system("apt autoremove --assume-yes")
|
||||
os.system("apt clean --assume-yes")
|
||||
|
||||
#
|
||||
# Yunohost upgrade
|
||||
#
|
||||
logger.info(m18n.n("migration_0015_yunohost_upgrade"))
|
||||
self.unhold(apps_packages)
|
||||
tools_upgrade(system=True)
|
||||
|
||||
def debian_major_version(self):
|
||||
# The python module "platform" and lsb_release are not reliable because
|
||||
# on some setup, they may still return Release=9 even after upgrading to
|
||||
# buster ... (Apparently this is related to OVH overriding some stuff
|
||||
# with /etc/lsb-release for instance -_-)
|
||||
# Instead, we rely on /etc/os-release which should be the raw info from
|
||||
# the distribution...
|
||||
return int(check_output("grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2"))
|
||||
|
||||
def yunohost_major_version(self):
|
||||
return int(get_ynh_package_version("yunohost")["version"].split('.')[0])
|
||||
|
||||
def check_assertions(self):
|
||||
|
||||
# Be on stretch (9.x) and yunohost 3.x
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be > 9.x but yunohost package
|
||||
# would still be in 3.x...
|
||||
if not self.debian_major_version() == 9 \
|
||||
and not self.yunohost_major_version() == 3:
|
||||
raise YunohostError("migration_0015_not_stretch")
|
||||
|
||||
# Have > 1 Go free space on /var/ ?
|
||||
if free_space_in_directory("/var/") / (1024**3) < 1.0:
|
||||
raise YunohostError("migration_0015_not_enough_free_space")
|
||||
|
||||
# Check system is up to date
|
||||
# (but we don't if 'stretch' is already in the sources.list ...
|
||||
# which means maybe a previous upgrade crashed and we're re-running it)
|
||||
if " buster " not in read_file("/etc/apt/sources.list"):
|
||||
tools_update(system=True)
|
||||
upgradable_system_packages = list(_list_upgradable_apt_packages())
|
||||
if upgradable_system_packages:
|
||||
raise YunohostError("migration_0015_system_not_fully_up_to_date")
|
||||
|
||||
@property
|
||||
def disclaimer(self):
|
||||
|
||||
# Avoid having a super long disclaimer + uncessary check if we ain't
|
||||
# on stretch / yunohost 3.x anymore
|
||||
# NB : we do both check to cover situations where the upgrade crashed
|
||||
# in the middle and debian version could be >= 10.x but yunohost package
|
||||
# would still be in 3.x...
|
||||
if not self.debian_major_version() == 9 \
|
||||
and not self.yunohost_major_version() == 3:
|
||||
return None
|
||||
|
||||
# Get list of problematic apps ? I.e. not official or community+working
|
||||
problematic_apps = unstable_apps()
|
||||
problematic_apps = "".join(["\n - " + app for app in problematic_apps])
|
||||
|
||||
# Manually modified files ? (c.f. yunohost service regen-conf)
|
||||
modified_files = manually_modified_files()
|
||||
modified_files = "".join(["\n - " + f for f in modified_files])
|
||||
|
||||
message = m18n.n("migration_0015_general_warning")
|
||||
|
||||
message = "N.B.: This migration has been tested by the community over the last few months but has only been declared stable recently. If your server hosts critical services and if you are not too confident with debugging possible issues, we recommend you to wait a little bit more while we gather more feedback and polish things up. If on the other hand you are relatively confident with debugging small issues that may arise, you are encouraged to run this migration ;)! You can read about remaining known issues and feedback from the community here: https://forum.yunohost.org/t/12195\n\n" + message
|
||||
|
||||
if problematic_apps:
|
||||
message += "\n\n" + m18n.n("migration_0015_problematic_apps_warning", problematic_apps=problematic_apps)
|
||||
|
||||
if modified_files:
|
||||
message += "\n\n" + m18n.n("migration_0015_modified_files", manually_modified_files=modified_files)
|
||||
|
||||
return message
|
||||
|
||||
def patch_apt_sources_list(self):
|
||||
|
||||
sources_list = glob.glob("/etc/apt/sources.list.d/*.list")
|
||||
sources_list.append("/etc/apt/sources.list")
|
||||
|
||||
# This :
|
||||
# - replace single 'stretch' occurence by 'buster'
|
||||
# - comments lines containing "backports"
|
||||
# - replace 'stretch/updates' by 'strech/updates' (or same with -)
|
||||
for f in sources_list:
|
||||
command = "sed -i -e 's@ stretch @ buster @g' " \
|
||||
"-e '/backports/ s@^#*@#@' " \
|
||||
"-e 's@ stretch/updates @ buster/updates @g' " \
|
||||
"-e 's@ stretch-@ buster-@g' " \
|
||||
"{}".format(f)
|
||||
os.system(command)
|
||||
|
||||
def get_apps_equivs_packages(self):
|
||||
|
||||
command = "dpkg --get-selections" \
|
||||
" | grep -v deinstall" \
|
||||
" | awk '{print $1}'" \
|
||||
" | { grep 'ynh-deps$' || true; }"
|
||||
|
||||
output = check_output(command).strip()
|
||||
|
||||
return output.split('\n') if output else []
|
||||
|
||||
def hold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark hold {}".format(package))
|
||||
|
||||
def unhold(self, packages):
|
||||
for package in packages:
|
||||
os.system("apt-mark unhold {}".format(package))
|
||||
|
||||
def apt_install(self, cmd):
|
||||
|
||||
def is_relevant(l):
|
||||
return "Reading database ..." not in l.rstrip()
|
||||
|
||||
callbacks = (
|
||||
lambda l: logger.info("+ " + l.rstrip() + "\r") if is_relevant(l) else logger.debug(l.rstrip() + "\r"),
|
||||
lambda l: logger.warning(l.rstrip()),
|
||||
)
|
||||
|
||||
cmd = "LC_ALL=C DEBIAN_FRONTEND=noninteractive APT_LISTCHANGES_FRONTEND=none apt install --quiet -o=Dpkg::Use-Pty=0 --fix-broken --assume-yes " + cmd
|
||||
|
||||
logger.debug("Running: %s" % cmd)
|
||||
|
||||
call_async_output(cmd, callbacks, shell=True)
|
||||
|
||||
def validate_and_upgrade_cert_if_necessary(self):
|
||||
|
||||
active_certs = set(check_output("grep -roh '/.*crt.pem' /etc/nginx/").strip().split("\n"))
|
||||
|
||||
cmd = "LC_ALL=C openssl x509 -in %s -text -noout | grep -i 'Signature Algorithm:' | awk '{print $3}' | uniq"
|
||||
|
||||
default_crt = '/etc/yunohost/certs/yunohost.org/crt.pem'
|
||||
default_key = '/etc/yunohost/certs/yunohost.org/key.pem'
|
||||
default_signature = check_output(cmd % default_crt).strip() if default_crt in active_certs else None
|
||||
if default_signature is not None and (default_signature.startswith("md5") or default_signature.startswith("sha1")):
|
||||
logger.warning("%s is using a pretty old certificate incompatible with newer versions of nginx ... attempting to regenerate a fresh one" % default_crt)
|
||||
|
||||
os.system("mv %s %s.old" % (default_crt, default_crt))
|
||||
os.system("mv %s %s.old" % (default_key, default_key))
|
||||
ret = os.system("/usr/share/yunohost/hooks/conf_regen/02-ssl init")
|
||||
|
||||
if ret != 0 or not os.path.exists(default_crt):
|
||||
logger.error("Upgrading the certificate failed ... reverting")
|
||||
os.system("mv %s.old %s" % (default_crt, default_crt))
|
||||
os.system("mv %s.old %s" % (default_key, default_key))
|
||||
|
||||
signatures = {cert: check_output(cmd % cert).strip() for cert in active_certs}
|
||||
|
||||
def cert_is_weak(cert):
|
||||
sig = signatures[cert]
|
||||
return sig.startswith("md5") or sig.startswith("sha1")
|
||||
|
||||
weak_certs = [cert for cert in signatures.keys() if cert_is_weak(cert)]
|
||||
if weak_certs:
|
||||
raise YunohostError("migration_0015_weak_certs", certs=", ".join(weak_certs))
|
73
src/yunohost/data_migrations/0016_php70_to_php73_pools.py
Normal file
73
src/yunohost/data_migrations/0016_php70_to_php73_pools.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
import os
|
||||
import glob
|
||||
from shutil import copy2
|
||||
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.app import _is_installed, _get_app_settings, _set_app_settings, _patch_legacy_php_versions_in_settings
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.service import _run_service_command
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
PHP70_POOLS = "/etc/php/7.0/fpm/pool.d"
|
||||
PHP73_POOLS = "/etc/php/7.3/fpm/pool.d"
|
||||
|
||||
PHP70_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
|
||||
PHP73_SOCKETS_PREFIX = "/run/php/php7.3-fpm"
|
||||
|
||||
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP70_POOLS)
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate php7.0-fpm 'pool' conf files to php7.3"
|
||||
|
||||
dependencies = ["migrate_to_buster"]
|
||||
|
||||
def run(self):
|
||||
# Get list of php7.0 pool files
|
||||
php70_pool_files = glob.glob("{}/*.conf".format(PHP70_POOLS))
|
||||
|
||||
# Keep only basenames
|
||||
php70_pool_files = [os.path.basename(f) for f in php70_pool_files]
|
||||
|
||||
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
|
||||
php70_pool_files = [f for f in php70_pool_files if f != "www.conf"]
|
||||
|
||||
for f in php70_pool_files:
|
||||
|
||||
# Copy the files to the php7.3 pool
|
||||
src = "{}/{}".format(PHP70_POOLS, f)
|
||||
dest = "{}/{}".format(PHP73_POOLS, f)
|
||||
copy2(src, dest)
|
||||
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, dest)
|
||||
os.system(c)
|
||||
|
||||
# Also add a comment that it was automatically moved from php7.0
|
||||
# (for human traceability and backward migration)
|
||||
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
|
||||
os.system(c)
|
||||
|
||||
app_id = os.path.basename(f)[:-len(".conf")]
|
||||
if _is_installed(app_id):
|
||||
_patch_legacy_php_versions_in_settings("/etc/yunohost/apps/%s/" % app_id)
|
||||
|
||||
nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/%s.conf" % app_id)
|
||||
for f in nginx_conf_files:
|
||||
# Replace the socket prefix if it's found
|
||||
c = "sed -i -e 's@{}@{}@g' {}".format(PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, f)
|
||||
os.system(c)
|
||||
|
||||
os.system("rm /etc/logrotate.d/php7.0-fpm") # We remove this otherwise the logrotate cron will be unhappy
|
||||
|
||||
# Reload/restart the php pools
|
||||
_run_service_command("restart", "php7.3-fpm")
|
||||
_run_service_command("enable", "php7.3-fpm")
|
||||
os.system("systemctl stop php7.0-fpm")
|
||||
os.system("systemctl disable php7.0-fpm")
|
||||
|
||||
# Reload nginx
|
||||
_run_service_command("reload", "nginx")
|
66
src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py
Normal file
66
src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py
Normal file
|
@ -0,0 +1,66 @@
|
|||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.tools import Migration
|
||||
from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate DBs from Postgresql 9.6 to 11 after migrating to Buster"
|
||||
|
||||
dependencies = ["migrate_to_buster"]
|
||||
|
||||
def run(self):
|
||||
|
||||
if not self.package_is_installed("postgresql-9.6"):
|
||||
logger.warning(m18n.n("migration_0017_postgresql_96_not_installed"))
|
||||
return
|
||||
|
||||
if not self.package_is_installed("postgresql-11"):
|
||||
raise YunohostError("migration_0017_postgresql_11_not_installed")
|
||||
|
||||
# Make sure there's a 9.6 cluster
|
||||
try:
|
||||
self.runcmd("pg_lsclusters | grep -q '^9.6 '")
|
||||
except Exception as e:
|
||||
logger.warning("It looks like there's not active 9.6 cluster, so probably don't need to run this migration")
|
||||
return
|
||||
|
||||
if not space_used_by_directory("/var/lib/postgresql/9.6") > free_space_in_directory("/var/lib/postgresql"):
|
||||
raise YunohostError("migration_0017_not_enough_space", path="/var/lib/postgresql/")
|
||||
|
||||
self.runcmd("systemctl stop postgresql")
|
||||
self.runcmd("pg_dropcluster --stop 11 main || true") # We do not trigger an exception if the command fails because that probably means cluster 11 doesn't exists, which is fine because it's created during the pg_upgradecluster)
|
||||
self.runcmd("pg_upgradecluster -m upgrade 9.6 main")
|
||||
self.runcmd("pg_dropcluster --stop 9.6 main")
|
||||
self.runcmd("systemctl start postgresql")
|
||||
|
||||
def package_is_installed(self, package_name):
|
||||
|
||||
(returncode, out, err) = self.runcmd("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), raise_on_errors=False)
|
||||
return returncode == 0
|
||||
|
||||
def runcmd(self, cmd, raise_on_errors=True):
|
||||
|
||||
logger.debug("Running command: " + cmd)
|
||||
|
||||
p = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
executable='/bin/bash',
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
returncode = p.returncode
|
||||
if raise_on_errors and returncode != 0:
|
||||
raise YunohostError("Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(cmd, returncode, out, err))
|
||||
|
||||
out = out.strip().split("\n")
|
||||
return (returncode, out, err)
|
||||
|
109
src/yunohost/data_migrations/0018_xtable_to_nftable.py
Normal file
109
src/yunohost/data_migrations/0018_xtable_to_nftable.py
Normal file
|
@ -0,0 +1,109 @@
|
|||
import os
|
||||
import subprocess
|
||||
|
||||
from moulinette import m18n
|
||||
from yunohost.utils.error import YunohostError
|
||||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.firewall import firewall_reload
|
||||
from yunohost.service import service_restart
|
||||
from yunohost.tools import Migration
|
||||
|
||||
logger = getActionLogger('yunohost.migration')
|
||||
|
||||
|
||||
class MyMigration(Migration):
|
||||
|
||||
"Migrate legacy iptables rules from stretch that relied on xtable and should now rely on nftable"
|
||||
|
||||
dependencies = ["migrate_to_buster"]
|
||||
|
||||
def run(self):
|
||||
|
||||
self.do_ipv4 = os.system("iptables -w -L >/dev/null") == 0
|
||||
self.do_ipv6 = os.system("ip6tables -w -L >/dev/null") == 0
|
||||
|
||||
if not self.do_ipv4:
|
||||
logger.warning(m18n.n('iptables_unavailable'))
|
||||
if not self.do_ipv6:
|
||||
logger.warning(m18n.n('ip6tables_unavailable'))
|
||||
|
||||
backup_folder = "/home/yunohost.backup/premigration/xtable_to_nftable/"
|
||||
if not os.path.exists(backup_folder):
|
||||
os.makedirs(backup_folder, 0o750)
|
||||
self.backup_rules_ipv4 = os.path.join(backup_folder, "legacy_rules_ipv4")
|
||||
self.backup_rules_ipv6 = os.path.join(backup_folder, "legacy_rules_ipv6")
|
||||
|
||||
# Backup existing legacy rules to be able to rollback
|
||||
if self.do_ipv4 and not os.path.exists(self.backup_rules_ipv4):
|
||||
self.runcmd("iptables-legacy -L >/dev/null") # For some reason if we don't do this, iptables-legacy-save is empty ?
|
||||
self.runcmd("iptables-legacy-save > %s" % self.backup_rules_ipv4)
|
||||
assert open(self.backup_rules_ipv4).read().strip(), "Uhoh backup of legacy ipv4 rules is empty !?"
|
||||
if self.do_ipv6 and not os.path.exists(self.backup_rules_ipv6):
|
||||
self.runcmd("ip6tables-legacy -L >/dev/null") # For some reason if we don't do this, iptables-legacy-save is empty ?
|
||||
self.runcmd("ip6tables-legacy-save > %s" % self.backup_rules_ipv6)
|
||||
assert open(self.backup_rules_ipv6).read().strip(), "Uhoh backup of legacy ipv6 rules is empty !?"
|
||||
|
||||
# We inject the legacy rules (iptables-legacy) into the new iptable (just "iptables")
|
||||
try:
|
||||
if self.do_ipv4:
|
||||
self.runcmd("iptables-legacy-save | iptables-restore")
|
||||
if self.do_ipv6:
|
||||
self.runcmd("ip6tables-legacy-save | ip6tables-restore")
|
||||
except Exception as e:
|
||||
self.rollback()
|
||||
raise YunohostError("migration_0018_failed_to_migrate_iptables_rules", error=e)
|
||||
|
||||
# Reset everything in iptables-legacy
|
||||
# Stolen from https://serverfault.com/a/200642
|
||||
try:
|
||||
if self.do_ipv4:
|
||||
self.runcmd(
|
||||
"iptables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat
|
||||
" /^:[A-Z]+ [^-]/ { print $1 \" ACCEPT\" ; }" # Turn all policies to accept
|
||||
" /COMMIT/ { print $0; }'" # Keep the line COMMIT
|
||||
" | iptables-legacy-restore")
|
||||
if self.do_ipv6:
|
||||
self.runcmd(
|
||||
"ip6tables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat
|
||||
" /^:[A-Z]+ [^-]/ { print $1 \" ACCEPT\" ; }" # Turn all policies to accept
|
||||
" /COMMIT/ { print $0; }'" # Keep the line COMMIT
|
||||
" | ip6tables-legacy-restore")
|
||||
except Exception as e:
|
||||
self.rollback()
|
||||
raise YunohostError("migration_0018_failed_to_reset_legacy_rules", error=e)
|
||||
|
||||
# You might be wondering "uh but is it really useful to
|
||||
# iptables-legacy-save | iptables-restore considering firewall_reload()
|
||||
# flush/resets everything anyway ?"
|
||||
# But the answer is : firewall_reload() only resets the *filter table.
|
||||
# On more complex setups (e.g. internet cube or docker) you will also
|
||||
# have rules in the *nat (or maybe *raw?) sections of iptables.
|
||||
firewall_reload()
|
||||
service_restart("fail2ban")
|
||||
|
||||
def rollback(self):
|
||||
|
||||
if self.do_ipv4:
|
||||
self.runcmd("iptables-legacy-restore < %s" % self.backup_rules_ipv4)
|
||||
if self.do_ipv6:
|
||||
self.runcmd("iptables-legacy-restore < %s" % self.backup_rules_ipv6)
|
||||
|
||||
def runcmd(self, cmd, raise_on_errors=True):
|
||||
|
||||
logger.debug("Running command: " + cmd)
|
||||
|
||||
p = subprocess.Popen(cmd,
|
||||
shell=True,
|
||||
executable='/bin/bash',
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
out, err = p.communicate()
|
||||
returncode = p.returncode
|
||||
if raise_on_errors and returncode != 0:
|
||||
raise YunohostError("Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(cmd, returncode, out, err))
|
||||
|
||||
out = out.strip().split("\n")
|
||||
return (returncode, out, err)
|
||||
|
|
@ -27,6 +27,7 @@
|
|||
import re
|
||||
import os
|
||||
import time
|
||||
import smtplib
|
||||
|
||||
from moulinette import m18n, msettings
|
||||
from moulinette.utils import log
|
||||
|
@ -41,6 +42,7 @@ DIAGNOSIS_CACHE = "/var/cache/yunohost/diagnosis/"
|
|||
DIAGNOSIS_CONFIG_FILE = '/etc/yunohost/diagnosis.yml'
|
||||
DIAGNOSIS_SERVER = "diagnosis.yunohost.org"
|
||||
|
||||
|
||||
def diagnosis_list():
|
||||
all_categories_names = [h for h, _ in _list_diagnosis_categories()]
|
||||
return {"categories": all_categories_names}
|
||||
|
@ -65,7 +67,7 @@ def diagnosis_get(category, item):
|
|||
return Diagnoser.get_cached_report(category, item=item)
|
||||
|
||||
|
||||
def diagnosis_show(categories=[], issues=False, full=False, share=False):
|
||||
def diagnosis_show(categories=[], issues=False, full=False, share=False, human_readable=False):
|
||||
|
||||
if not os.path.exists(DIAGNOSIS_CACHE):
|
||||
logger.warning(m18n.n("diagnosis_never_ran_yet"))
|
||||
|
@ -93,7 +95,7 @@ def diagnosis_show(categories=[], issues=False, full=False, share=False):
|
|||
logger.error(m18n.n("diagnosis_failed", category=category, error=str(e)))
|
||||
continue
|
||||
|
||||
Diagnoser.i18n(report)
|
||||
Diagnoser.i18n(report, force_remove_html_tags=share or human_readable)
|
||||
|
||||
add_ignore_flag_to_issues(report)
|
||||
if not full:
|
||||
|
@ -123,9 +125,12 @@ def diagnosis_show(categories=[], issues=False, full=False, share=False):
|
|||
return {"url": url}
|
||||
else:
|
||||
return
|
||||
elif human_readable:
|
||||
print(_dump_human_readable_reports(all_reports))
|
||||
else:
|
||||
return {"reports": all_reports}
|
||||
|
||||
|
||||
def _dump_human_readable_reports(reports):
|
||||
|
||||
output = ""
|
||||
|
@ -137,16 +142,16 @@ def _dump_human_readable_reports(reports):
|
|||
for item in report["items"]:
|
||||
output += "[{status}] {summary}\n".format(**item)
|
||||
for detail in item.get("details", []):
|
||||
output += " - " + detail + "\n"
|
||||
output += " - " + detail.replace("\n", "\n ") + "\n"
|
||||
output += "\n"
|
||||
output += "\n\n"
|
||||
|
||||
return(output)
|
||||
|
||||
|
||||
def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False):
|
||||
def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False, email=False):
|
||||
|
||||
if except_if_never_ran_yet and not os.path.exists(DIAGNOSIS_CACHE):
|
||||
if (email or except_if_never_ran_yet) and not os.path.exists(DIAGNOSIS_CACHE):
|
||||
return
|
||||
|
||||
# Get all the categories
|
||||
|
@ -170,7 +175,7 @@ def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False):
|
|||
|
||||
try:
|
||||
code, report = hook_exec(path, args={"force": force}, env=None)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
import traceback
|
||||
logger.error(m18n.n("diagnosis_failed_for_category", category=category, error='\n'+traceback.format_exc()))
|
||||
else:
|
||||
|
@ -178,11 +183,11 @@ def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False):
|
|||
if report != {}:
|
||||
issues.extend([item for item in report["items"] if item["status"] in ["WARNING", "ERROR"]])
|
||||
|
||||
if email:
|
||||
_email_diagnosis_issues()
|
||||
if issues and msettings.get("interface") == "cli":
|
||||
logger.warning(m18n.n("diagnosis_display_tip"))
|
||||
|
||||
return
|
||||
|
||||
|
||||
def diagnosis_ignore(add_filter=None, remove_filter=None, list=False):
|
||||
"""
|
||||
|
@ -318,6 +323,7 @@ def issue_matches_criterias(issue, criterias):
|
|||
return False
|
||||
return True
|
||||
|
||||
|
||||
def add_ignore_flag_to_issues(report):
|
||||
"""
|
||||
Iterate over issues in a report, and flag them as ignored if they match an
|
||||
|
@ -420,10 +426,11 @@ class Diagnoser():
|
|||
return os.path.join(DIAGNOSIS_CACHE, "%s.json" % id_)
|
||||
|
||||
@staticmethod
|
||||
def get_cached_report(id_, item=None):
|
||||
def get_cached_report(id_, item=None, warn_if_no_cache=True):
|
||||
cache_file = Diagnoser.cache_file(id_)
|
||||
if not os.path.exists(cache_file):
|
||||
logger.warning(m18n.n("diagnosis_no_cache", category=id_))
|
||||
if warn_if_no_cache:
|
||||
logger.warning(m18n.n("diagnosis_no_cache", category=id_))
|
||||
report = {"id": id_,
|
||||
"cached_for": -1,
|
||||
"timestamp": -1,
|
||||
|
@ -445,10 +452,10 @@ class Diagnoser():
|
|||
key = "diagnosis_description_" + id_
|
||||
descr = m18n.n(key)
|
||||
# If no description available, fallback to id
|
||||
return descr if descr != key else id_
|
||||
return descr if descr.decode('utf-8') != key else id_
|
||||
|
||||
@staticmethod
|
||||
def i18n(report):
|
||||
def i18n(report, force_remove_html_tags=False):
|
||||
|
||||
# "Render" the strings with m18n.n
|
||||
# N.B. : we do those m18n.n right now instead of saving the already-translated report
|
||||
|
@ -477,7 +484,7 @@ class Diagnoser():
|
|||
info[1].update(meta_data)
|
||||
s = m18n.n(info[0], **(info[1]))
|
||||
# In cli, we remove the html tags
|
||||
if msettings.get("interface") != "api":
|
||||
if msettings.get("interface") != "api" or force_remove_html_tags:
|
||||
s = s.replace("<cmd>", "'").replace("</cmd>", "'")
|
||||
s = html_tags.sub('', s.replace("<br>","\n"))
|
||||
else:
|
||||
|
@ -547,3 +554,35 @@ def _list_diagnosis_categories():
|
|||
hooks.append((name, info["path"]))
|
||||
|
||||
return hooks
|
||||
|
||||
|
||||
def _email_diagnosis_issues():
|
||||
from yunohost.domain import _get_maindomain
|
||||
maindomain = _get_maindomain()
|
||||
from_ = "diagnosis@%s (Automatic diagnosis on %s)" % (maindomain, maindomain)
|
||||
to_ = "root"
|
||||
subject_ = "Issues found by automatic diagnosis on %s" % maindomain
|
||||
|
||||
disclaimer = "The automatic diagnosis on your YunoHost server identified some issues on your server. You will find a description of the issues below. You can manage those issues in the 'Diagnosis' section in your webadmin."
|
||||
|
||||
issues = diagnosis_show(issues=True)["reports"]
|
||||
if not issues:
|
||||
return
|
||||
|
||||
content = _dump_human_readable_reports(issues)
|
||||
|
||||
message = """\
|
||||
From: %s
|
||||
To: %s
|
||||
Subject: %s
|
||||
|
||||
%s
|
||||
|
||||
---
|
||||
|
||||
%s
|
||||
""" % (from_, to_, subject_, disclaimer, content)
|
||||
|
||||
smtp = smtplib.SMTP("localhost")
|
||||
smtp.sendmail(from_, [to_], message)
|
||||
smtp.quit()
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue