mirror of
https://github.com/YunoHost/yunohost.git
synced 2024-09-03 20:06:10 +02:00
Merge branch 'stretch-unstable' into clean_helpers
This commit is contained in:
commit
8501943f9c
12 changed files with 198 additions and 29 deletions
|
@ -92,3 +92,10 @@ test-user-group:
|
|||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_user-group.py
|
||||
|
||||
test-regenconf:
|
||||
extends: .tests
|
||||
stage: tests
|
||||
script:
|
||||
- cd src/yunohost
|
||||
- py.test tests/test_regenconf.py
|
||||
|
|
|
@ -132,7 +132,7 @@ EOF
|
|||
ynh_store_file_checksum "$finalfail2banjailconf"
|
||||
ynh_store_file_checksum "$finalfail2banfilterconf"
|
||||
|
||||
ynh_systemd_action --service_name=fail2ban --action=reload
|
||||
ynh_systemd_action --service_name=fail2ban --action=reload --line_match="(Started|Reloaded) Fail2Ban Service" --log_path=systemd
|
||||
|
||||
local fail2ban_error="$(journalctl --unit=fail2ban | tail --lines=50 | grep "WARNING.*$app.*")"
|
||||
if [[ -n "$fail2ban_error" ]]
|
||||
|
|
|
@ -146,7 +146,7 @@ ynh_systemd_action() {
|
|||
for i in $(seq 1 $timeout)
|
||||
do
|
||||
# Read the log until the sentence is found, that means the app finished to start. Or run until the timeout
|
||||
if grep --quiet "$line_match" "$templog"
|
||||
if grep --extended-regexp --quiet "$line_match" "$templog"
|
||||
then
|
||||
ynh_print_info --message="The service $service_name has correctly executed the action ${action}."
|
||||
break
|
||||
|
|
|
@ -27,7 +27,8 @@ do_init_regen() {
|
|||
ynh_render_template "yunohost_admin.conf" "${nginx_conf_dir}/yunohost_admin.conf"
|
||||
|
||||
# Restart nginx if conf looks good, otherwise display error and exit unhappy
|
||||
nginx -t 2>/dev/null && service nginx restart || (nginx -t && exit 1)
|
||||
nginx -t 2>/dev/null || { nginx -t; exit 1; }
|
||||
systemctl restart nginx || { journalctl --no-pager --lines=10 -u nginx >&2; exit 1; }
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
@ -125,9 +126,9 @@ do_post_regen() {
|
|||
fi
|
||||
done
|
||||
|
||||
|
||||
# Reload nginx configuration
|
||||
pgrep nginx && service nginx reload
|
||||
# Reload nginx if conf looks good, otherwise display error and exit unhappy
|
||||
nginx -t 2>/dev/null || { nginx -t; exit 1; }
|
||||
pgrep nginx && systemctl reload nginx || { journalctl --no-pager --lines=10 -u nginx >&2; exit 1; }
|
||||
}
|
||||
|
||||
FORCE=${2:-0}
|
||||
|
|
|
@ -74,6 +74,8 @@
|
|||
"backup_archive_name_exists": "A backup archive with this name already exists.",
|
||||
"backup_archive_name_unknown": "Unknown local backup archive named '{name:s}'",
|
||||
"backup_archive_open_failed": "Could not open the backup archive",
|
||||
"backup_archive_cant_retrieve_info_json": "Could not load infos for archive '{archive}' ... The info.json cannot be retrieved (or is not a valid json).",
|
||||
"backup_archive_corrupted": "It looks like the backup archive '{archive}' is corrupted : {error}",
|
||||
"backup_archive_system_part_not_available": "System part '{part:s}' unavailable in this backup",
|
||||
"backup_archive_writing_error": "Could not add the files '{source:s}' (named in the archive '{dest:s}') to be backed up into the compressed archive '{archive:s}'",
|
||||
"backup_ask_for_copying_if_needed": "Do you want to perform the backup using {size:s} MB temporarily? (This way is used since some files could not be prepared using a more efficient method.)",
|
||||
|
@ -91,7 +93,6 @@
|
|||
"backup_delete_error": "Could not delete '{path:s}'",
|
||||
"backup_deleted": "Backup deleted",
|
||||
"backup_hook_unknown": "The backup hook '{hook:s}' is unknown",
|
||||
"backup_invalid_archive": "This is not a backup archive",
|
||||
"backup_method_borg_finished": "Backup into Borg finished",
|
||||
"backup_method_copy_finished": "Backup copy finalized",
|
||||
"backup_method_custom_finished": "Custom backup method '{method:s}' finished",
|
||||
|
|
|
@ -261,6 +261,8 @@ def app_map(app=None, raw=False, user=None):
|
|||
perm_domain, perm_path = perm_url.split("/", 1)
|
||||
perm_path = "/" + perm_path.rstrip("/")
|
||||
|
||||
perm_path = perm_path if perm_path.strip() != "" else "/"
|
||||
|
||||
return perm_domain, perm_path
|
||||
|
||||
this_app_perms = {p: i for p, i in permissions.items() if p.startswith(app_id + ".") and i["url"]}
|
||||
|
@ -296,7 +298,6 @@ def app_map(app=None, raw=False, user=None):
|
|||
continue
|
||||
|
||||
perm_domain, perm_path = _sanitized_absolute_url(perm_info["url"])
|
||||
|
||||
if perm_name.endswith(".main"):
|
||||
perm_label = label
|
||||
else:
|
||||
|
@ -1127,11 +1128,12 @@ def app_makedefault(operation_logger, app, domain=None):
|
|||
elif domain not in domain_list()['domains']:
|
||||
raise YunohostError('domain_unknown')
|
||||
|
||||
operation_logger.start()
|
||||
if '/' in app_map(raw=True)[domain]:
|
||||
raise YunohostError('app_make_default_location_already_used', app=app, domain=app_domain,
|
||||
other_app=app_map(raw=True)[domain]["/"]["id"])
|
||||
|
||||
operation_logger.start()
|
||||
|
||||
# TODO / FIXME : current trick is to add this to conf.json.persisten
|
||||
# This is really not robust and should be improved
|
||||
# e.g. have a flag in /etc/yunohost/apps/$app/ to say that this is the
|
||||
|
@ -1289,6 +1291,8 @@ def app_ssowatconf():
|
|||
perm_domain, perm_path = perm_url.split("/", 1)
|
||||
perm_path = "/" + perm_path.rstrip("/")
|
||||
|
||||
perm_path = perm_path if perm_path.strip() != "" else "/"
|
||||
|
||||
return perm_domain + perm_path
|
||||
|
||||
# Skipped
|
||||
|
|
|
@ -872,7 +872,7 @@ class RestoreManager():
|
|||
Read the info file from inside an archive
|
||||
|
||||
Exceptions:
|
||||
backup_invalid_archive -- Raised if we can't read the info
|
||||
backup_archive_cant_retrieve_info_json -- Raised if we can't read the info
|
||||
"""
|
||||
# Retrieve backup info
|
||||
info_file = os.path.join(self.work_dir, "info.json")
|
||||
|
@ -885,7 +885,7 @@ class RestoreManager():
|
|||
self.info["system"] = self.info["hooks"]
|
||||
except IOError:
|
||||
logger.debug("unable to load '%s'", info_file, exc_info=1)
|
||||
raise YunohostError('backup_invalid_archive')
|
||||
raise YunohostError('backup_archive_cant_retrieve_info_json', archive=self.archive_path)
|
||||
else:
|
||||
logger.debug("restoring from backup '%s' created on %s", self.name,
|
||||
datetime.utcfromtimestamp(self.info['created_at']))
|
||||
|
@ -893,10 +893,6 @@ class RestoreManager():
|
|||
def _postinstall_if_needed(self):
|
||||
"""
|
||||
Post install yunohost if needed
|
||||
|
||||
Exceptions:
|
||||
backup_invalid_archive -- Raised if the current_host isn't in the
|
||||
archive
|
||||
"""
|
||||
# Check if YunoHost is installed
|
||||
if not os.path.isfile('/etc/yunohost/installed'):
|
||||
|
@ -908,7 +904,7 @@ class RestoreManager():
|
|||
logger.debug("unable to retrieve current_host from the backup",
|
||||
exc_info=1)
|
||||
# FIXME include the current_host by default ?
|
||||
raise YunohostError('backup_invalid_archive')
|
||||
raise YunohostError("The main domain name cannot be retrieved from inside the archive, and is needed to perform the postinstall", raw_msg=True)
|
||||
|
||||
logger.debug("executing the post-install...")
|
||||
tools_postinstall(domain, 'Yunohost', True)
|
||||
|
@ -1915,6 +1911,8 @@ class TarBackupMethod(BackupMethod):
|
|||
|
||||
Exceptions:
|
||||
backup_archive_open_failed -- Raised if the archive can't be open
|
||||
backup_archive_corrupted -- Raised if the archive appears corrupted
|
||||
backup_archive_cant_retrieve_info_json -- If the info.json file can't be retrieved
|
||||
"""
|
||||
super(TarBackupMethod, self).mount(restore_manager)
|
||||
|
||||
|
@ -1926,6 +1924,11 @@ class TarBackupMethod(BackupMethod):
|
|||
self._archive_file, exc_info=1)
|
||||
raise YunohostError('backup_archive_open_failed')
|
||||
|
||||
try:
|
||||
files_in_archive = tar.getnames()
|
||||
except IOError as e:
|
||||
raise YunohostError("backup_archive_corrupted", archive=self._archive_file, error=str(e))
|
||||
|
||||
# FIXME : Is this really useful to close the archive just to
|
||||
# reopen it right after this with the same options ...?
|
||||
tar.close()
|
||||
|
@ -1934,21 +1937,21 @@ class TarBackupMethod(BackupMethod):
|
|||
logger.debug(m18n.n("restore_extracting"))
|
||||
tar = tarfile.open(self._archive_file, "r:gz")
|
||||
|
||||
if "info.json" in tar.getnames():
|
||||
if "info.json" in files_in_archive:
|
||||
leading_dot = ""
|
||||
tar.extract('info.json', path=self.work_dir)
|
||||
elif "./info.json" in tar.getnames():
|
||||
elif "./info.json" in files_in_archive:
|
||||
leading_dot = "./"
|
||||
tar.extract('./info.json', path=self.work_dir)
|
||||
else:
|
||||
logger.debug("unable to retrieve 'info.json' inside the archive",
|
||||
exc_info=1)
|
||||
tar.close()
|
||||
raise YunohostError('backup_invalid_archive')
|
||||
raise YunohostError('backup_archive_cant_retrieve_info_json', archive=self._archive_file)
|
||||
|
||||
if "backup.csv" in tar.getnames():
|
||||
if "backup.csv" in files_in_archive:
|
||||
tar.extract('backup.csv', path=self.work_dir)
|
||||
elif "./backup.csv" in tar.getnames():
|
||||
elif "./backup.csv" in files_in_archive:
|
||||
tar.extract('./backup.csv', path=self.work_dir)
|
||||
else:
|
||||
# Old backup archive have no backup.csv file
|
||||
|
@ -2290,7 +2293,7 @@ def backup_list(with_info=False, human_readable=False):
|
|||
try:
|
||||
d[a] = backup_info(a, human_readable=human_readable)
|
||||
except YunohostError as e:
|
||||
logger.warning('%s: %s' % (a, e.strerror))
|
||||
logger.warning(str(e))
|
||||
|
||||
result = d
|
||||
|
||||
|
@ -2327,17 +2330,23 @@ def backup_info(name, with_details=False, human_readable=False):
|
|||
if not os.path.exists(info_file):
|
||||
tar = tarfile.open(archive_file, "r:gz")
|
||||
info_dir = info_file + '.d'
|
||||
|
||||
try:
|
||||
if "info.json" in tar.getnames():
|
||||
files_in_archive = tar.getnames()
|
||||
except IOError as e:
|
||||
raise YunohostError("backup_archive_corrupted", archive=archive_file, error=str(e))
|
||||
|
||||
try:
|
||||
if "info.json" in files_in_archive:
|
||||
tar.extract('info.json', path=info_dir)
|
||||
elif "./info.json" in tar.getnames():
|
||||
elif "./info.json" in files_in_archive:
|
||||
tar.extract('./info.json', path=info_dir)
|
||||
else:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
logger.debug("unable to retrieve '%s' inside the archive",
|
||||
info_file, exc_info=1)
|
||||
raise YunohostError('backup_invalid_archive')
|
||||
raise YunohostError('backup_archive_cant_retrieve_info_json', archive=archive_file)
|
||||
else:
|
||||
shutil.move(os.path.join(info_dir, 'info.json'), info_file)
|
||||
finally:
|
||||
|
@ -2350,7 +2359,7 @@ def backup_info(name, with_details=False, human_readable=False):
|
|||
info = json.load(f)
|
||||
except:
|
||||
logger.debug("unable to load '%s'", info_file, exc_info=1)
|
||||
raise YunohostError('backup_invalid_archive')
|
||||
raise YunohostError('backup_archive_cant_retrieve_info_json', archive=archive_file)
|
||||
|
||||
# Retrieve backup size
|
||||
size = info.get('size', 0)
|
||||
|
|
|
@ -33,7 +33,7 @@ from yunohost.utils.error import YunohostError
|
|||
from moulinette.utils.log import getActionLogger
|
||||
|
||||
from yunohost.app import app_ssowatconf
|
||||
from yunohost.regenconf import regen_conf
|
||||
from yunohost.regenconf import regen_conf, _force_clear_hashes, _process_regen_conf
|
||||
from yunohost.utils.network import get_public_ip
|
||||
from yunohost.log import is_unit_operation
|
||||
from yunohost.hook import hook_callback
|
||||
|
@ -124,6 +124,17 @@ def domain_add(operation_logger, domain, dyndns=False):
|
|||
|
||||
# Don't regen these conf if we're still in postinstall
|
||||
if os.path.exists('/etc/yunohost/installed'):
|
||||
# Sometime we have weird issues with the regenconf where some files
|
||||
# appears as manually modified even though they weren't touched ...
|
||||
# There are a few ideas why this happens (like backup/restore nginx
|
||||
# conf ... which we shouldnt do ...). This in turns creates funky
|
||||
# situation where the regenconf may refuse to re-create the conf
|
||||
# (when re-creating a domain..)
|
||||
# So here we force-clear the has out of the regenconf if it exists.
|
||||
# This is a pretty ad hoc solution and only applied to nginx
|
||||
# because it's one of the major service, but in the long term we
|
||||
# should identify the root of this bug...
|
||||
_force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain])
|
||||
regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix', 'rspamd'])
|
||||
app_ssowatconf()
|
||||
|
||||
|
@ -188,6 +199,25 @@ def domain_remove(operation_logger, domain, force=False):
|
|||
|
||||
os.system('rm -rf /etc/yunohost/certs/%s' % domain)
|
||||
|
||||
# Sometime we have weird issues with the regenconf where some files
|
||||
# appears as manually modified even though they weren't touched ...
|
||||
# There are a few ideas why this happens (like backup/restore nginx
|
||||
# conf ... which we shouldnt do ...). This in turns creates funky
|
||||
# situation where the regenconf may refuse to re-create the conf
|
||||
# (when re-creating a domain..)
|
||||
#
|
||||
# So here we force-clear the has out of the regenconf if it exists.
|
||||
# This is a pretty ad hoc solution and only applied to nginx
|
||||
# because it's one of the major service, but in the long term we
|
||||
# should identify the root of this bug...
|
||||
_force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain])
|
||||
# And in addition we even force-delete the file Otherwise, if the file was
|
||||
# manually modified, it may not get removed by the regenconf which leads to
|
||||
# catastrophic consequences of nginx breaking because it can't load the
|
||||
# cert file which disappeared etc..
|
||||
if os.path.exists("/etc/nginx/conf.d/%s.conf" % domain):
|
||||
_process_regen_conf("/etc/nginx/conf.d/%s.conf" % domain, new_conf=None, save=True)
|
||||
|
||||
regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix'])
|
||||
app_ssowatconf()
|
||||
|
||||
|
|
|
@ -473,6 +473,18 @@ def _update_conf_hashes(category, hashes):
|
|||
_save_regenconf_infos(categories)
|
||||
|
||||
|
||||
def _force_clear_hashes(paths):
|
||||
|
||||
categories = _get_regenconf_infos()
|
||||
for path in paths:
|
||||
for category in categories.keys():
|
||||
if path in categories[category]['conffiles']:
|
||||
logger.debug("force-clearing old conf hash for %s in category %s" % (path, category))
|
||||
del categories[category]['conffiles'][path]
|
||||
|
||||
_save_regenconf_infos(categories)
|
||||
|
||||
|
||||
def _process_regen_conf(system_conf, new_conf=None, save=True):
|
||||
"""Regenerate a given system configuration file
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ from conftest import message, raiseYunohostError
|
|||
from moulinette import m18n
|
||||
from moulinette.utils.filesystem import mkdir
|
||||
|
||||
from yunohost.app import app_install, app_remove, app_ssowatconf, _is_installed, app_upgrade
|
||||
from yunohost.app import app_install, app_remove, app_ssowatconf, _is_installed, app_upgrade, app_map
|
||||
from yunohost.domain import _get_maindomain, domain_add, domain_remove, domain_list
|
||||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.tests.test_permission import check_LDAP_db_integrity, check_permission_for_apps
|
||||
|
@ -142,6 +142,12 @@ def test_legacy_app_install_main_domain():
|
|||
|
||||
install_legacy_app(main_domain, "/legacy")
|
||||
|
||||
app_map_ = app_map(raw=True)
|
||||
assert main_domain in app_map_
|
||||
assert '/legacy' in app_map_[main_domain]
|
||||
assert 'id' in app_map_[main_domain]['/legacy']
|
||||
assert app_map_[main_domain]['/legacy']['id'] == 'legacy_app'
|
||||
|
||||
assert app_is_installed(main_domain, "legacy_app")
|
||||
assert app_is_exposed_on_http(main_domain, "/legacy", "This is a dummy app")
|
||||
|
||||
|
@ -166,6 +172,12 @@ def test_legacy_app_install_secondary_domain_on_root(secondary_domain):
|
|||
|
||||
install_legacy_app(secondary_domain, "/")
|
||||
|
||||
app_map_ = app_map(raw=True)
|
||||
assert secondary_domain in app_map_
|
||||
assert '/' in app_map_[secondary_domain]
|
||||
assert 'id' in app_map_[secondary_domain]['/']
|
||||
assert app_map_[secondary_domain]['/']['id'] == 'legacy_app'
|
||||
|
||||
assert app_is_installed(secondary_domain, "legacy_app")
|
||||
assert app_is_exposed_on_http(secondary_domain, "/", "This is a dummy app")
|
||||
|
||||
|
|
|
@ -574,9 +574,22 @@ def test_restore_archive_with_no_json(mocker):
|
|||
|
||||
assert "badbackup" in backup_list()["archives"]
|
||||
|
||||
with raiseYunohostError(mocker, 'backup_invalid_archive'):
|
||||
with raiseYunohostError(mocker, 'backup_archive_cant_retrieve_info_json'):
|
||||
backup_restore(name="badbackup", force=True)
|
||||
|
||||
@pytest.mark.with_wordpress_archive_from_2p4
|
||||
def test_restore_archive_with_bad_archive(mocker):
|
||||
|
||||
# Break the archive
|
||||
os.system("head -n 1000 /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz > /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz")
|
||||
|
||||
assert "backup_wordpress_from_2p4" in backup_list()["archives"]
|
||||
|
||||
with raiseYunohostError(mocker, 'backup_archive_open_failed'):
|
||||
backup_restore(name="backup_wordpress_from_2p4", force=True)
|
||||
|
||||
clean_tmp_backup_directory()
|
||||
|
||||
|
||||
def test_backup_binds_are_readonly(mocker, monkeypatch):
|
||||
|
||||
|
|
80
src/yunohost/tests/test_regenconf.py
Normal file
80
src/yunohost/tests/test_regenconf.py
Normal file
|
@ -0,0 +1,80 @@
|
|||
import glob
|
||||
import os
|
||||
import pytest
|
||||
import shutil
|
||||
import requests
|
||||
|
||||
from conftest import message, raiseYunohostError
|
||||
|
||||
from moulinette import m18n
|
||||
from moulinette.utils.filesystem import mkdir
|
||||
|
||||
from yunohost.domain import _get_maindomain, domain_add, domain_remove, domain_list
|
||||
from yunohost.utils.error import YunohostError
|
||||
from yunohost.regenconf import manually_modified_files, _get_conf_hashes, _force_clear_hashes
|
||||
|
||||
TEST_DOMAIN = "secondarydomain.test"
|
||||
TEST_DOMAIN_NGINX_CONFIG = "/etc/nginx/conf.d/secondarydomain.test.conf"
|
||||
|
||||
def setup_function(function):
|
||||
|
||||
_force_clear_hashes([TEST_DOMAIN_NGINX_CONFIG])
|
||||
clean()
|
||||
|
||||
def teardown_function(function):
|
||||
|
||||
clean()
|
||||
_force_clear_hashes([TEST_DOMAIN_NGINX_CONFIG])
|
||||
|
||||
def clean():
|
||||
|
||||
assert os.system("pgrep slapd >/dev/null") == 0
|
||||
assert os.system("pgrep nginx >/dev/null") == 0
|
||||
|
||||
if TEST_DOMAIN in domain_list()["domains"]:
|
||||
domain_remove(TEST_DOMAIN)
|
||||
assert not os.path.exists(TEST_DOMAIN_NGINX_CONFIG)
|
||||
|
||||
os.system("rm -f %s" % TEST_DOMAIN_NGINX_CONFIG)
|
||||
|
||||
assert os.system("nginx -t 2>/dev/null") == 0
|
||||
|
||||
assert not os.path.exists(TEST_DOMAIN_NGINX_CONFIG)
|
||||
assert TEST_DOMAIN_NGINX_CONFIG not in _get_conf_hashes("nginx")
|
||||
assert TEST_DOMAIN_NGINX_CONFIG not in manually_modified_files()
|
||||
|
||||
|
||||
def test_add_domain():
|
||||
|
||||
domain_add(TEST_DOMAIN)
|
||||
|
||||
assert TEST_DOMAIN in domain_list()["domains"]
|
||||
|
||||
assert os.path.exists(TEST_DOMAIN_NGINX_CONFIG)
|
||||
|
||||
assert TEST_DOMAIN_NGINX_CONFIG in _get_conf_hashes("nginx")
|
||||
assert TEST_DOMAIN_NGINX_CONFIG not in manually_modified_files()
|
||||
|
||||
|
||||
def test_add_and_edit_domain_conf():
|
||||
|
||||
domain_add(TEST_DOMAIN)
|
||||
|
||||
assert os.path.exists(TEST_DOMAIN_NGINX_CONFIG)
|
||||
assert TEST_DOMAIN_NGINX_CONFIG in _get_conf_hashes("nginx")
|
||||
assert TEST_DOMAIN_NGINX_CONFIG not in manually_modified_files()
|
||||
|
||||
os.system("echo ' ' >> %s" % TEST_DOMAIN_NGINX_CONFIG)
|
||||
|
||||
assert TEST_DOMAIN_NGINX_CONFIG in manually_modified_files()
|
||||
|
||||
|
||||
def test_add_domain_conf_already_exists():
|
||||
|
||||
os.system("echo ' ' >> %s" % TEST_DOMAIN_NGINX_CONFIG)
|
||||
|
||||
domain_add(TEST_DOMAIN)
|
||||
|
||||
assert os.path.exists(TEST_DOMAIN_NGINX_CONFIG)
|
||||
assert TEST_DOMAIN_NGINX_CONFIG in _get_conf_hashes("nginx")
|
||||
assert TEST_DOMAIN_NGINX_CONFIG not in manually_modified_files()
|
Loading…
Add table
Reference in a new issue