Merge branch 'unstable' into services_dbus

This commit is contained in:
Bram 2018-05-14 04:06:29 +02:00 committed by GitHub
commit f114e378e5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 2735 additions and 575 deletions

View file

@ -37,16 +37,23 @@ ynh_get_plain_key() {
ynh_restore_upgradebackup () { ynh_restore_upgradebackup () {
echo "Upgrade failed." >&2 echo "Upgrade failed." >&2
local app_bck=${app//_/-} # Replace all '_' by '-' local app_bck=${app//_/-} # Replace all '_' by '-'
# Check if an existing backup can be found before removing and restoring the application. NO_BACKUP_UPGRADE=${NO_BACKUP_UPGRADE:-0}
if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$backup_number
then if [ "$NO_BACKUP_UPGRADE" -eq 0 ]
# Remove the application then restore it then
sudo yunohost app remove $app # Check if an existing backup can be found before removing and restoring the application.
# Restore the backup if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$backup_number
sudo yunohost backup restore --ignore-system $app_bck-pre-upgrade$backup_number --apps $app --force --verbose then
ynh_die "The app was restored to the way it was before the failed upgrade." # Remove the application then restore it
fi sudo yunohost app remove $app
# Restore the backup
sudo yunohost backup restore --ignore-system $app_bck-pre-upgrade$backup_number --apps $app --force --verbose
ynh_die "The app was restored to the way it was before the failed upgrade."
fi
else
echo "\$NO_BACKUP_UPGRADE is set, that means there's no backup to restore. You have to fix this upgrade by yourself !" >&2
fi
} }
# Make a backup in case of failed upgrade # Make a backup in case of failed upgrade
@ -67,28 +74,34 @@ ynh_backup_before_upgrade () {
backup_number=1 backup_number=1
local old_backup_number=2 local old_backup_number=2
local app_bck=${app//_/-} # Replace all '_' by '-' local app_bck=${app//_/-} # Replace all '_' by '-'
NO_BACKUP_UPGRADE=${NO_BACKUP_UPGRADE:-0}
# Check if a backup already exists with the prefix 1 if [ "$NO_BACKUP_UPGRADE" -eq 0 ]
if sudo yunohost backup list | grep -q $app_bck-pre-upgrade1 then
then # Check if a backup already exists with the prefix 1
# Prefix becomes 2 to preserve the previous backup if sudo yunohost backup list | grep -q $app_bck-pre-upgrade1
backup_number=2
old_backup_number=1
fi
# Create backup
sudo BACKUP_CORE_ONLY=1 yunohost backup create --ignore-system --apps $app --name $app_bck-pre-upgrade$backup_number --verbose
if [ "$?" -eq 0 ]
then
# If the backup succeeded, remove the previous backup
if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$old_backup_number
then then
# Remove the previous backup only if it exists # Prefix becomes 2 to preserve the previous backup
sudo yunohost backup delete $app_bck-pre-upgrade$old_backup_number > /dev/null backup_number=2
old_backup_number=1
fi fi
else
ynh_die "Backup failed, the upgrade process was aborted." # Create backup
fi sudo BACKUP_CORE_ONLY=1 yunohost backup create --ignore-system --apps $app --name $app_bck-pre-upgrade$backup_number --verbose
if [ "$?" -eq 0 ]
then
# If the backup succeeded, remove the previous backup
if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$old_backup_number
then
# Remove the previous backup only if it exists
sudo yunohost backup delete $app_bck-pre-upgrade$old_backup_number > /dev/null
fi
else
ynh_die "Backup failed, the upgrade process was aborted."
fi
else
echo "\$NO_BACKUP_UPGRADE is set, backup will be avoided. Be careful, this upgrade is going to be operated without a security backup"
fi
} }
# Download, check integrity, uncompress and patch the source from app.src # Download, check integrity, uncompress and patch the source from app.src
@ -109,6 +122,10 @@ ynh_backup_before_upgrade () {
# # (Optionnal) Name of the local archive (offline setup support) # # (Optionnal) Name of the local archive (offline setup support)
# # default: ${src_id}.${src_format} # # default: ${src_id}.${src_format}
# SOURCE_FILENAME=example.tar.gz # SOURCE_FILENAME=example.tar.gz
# # (Optional) If it set as false don't extract the source.
# # (Useful to get a debian package or a python wheel.)
# # default: true
# SOURCE_EXTRACT=(true|false)
# #
# Details: # Details:
# This helper downloads sources from SOURCE_URL if there is no local source # This helper downloads sources from SOURCE_URL if there is no local source
@ -137,6 +154,7 @@ ynh_setup_source () {
local src_sum=$(grep 'SOURCE_SUM=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_sum=$(grep 'SOURCE_SUM=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-)
local src_sumprg=$(grep 'SOURCE_SUM_PRG=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_sumprg=$(grep 'SOURCE_SUM_PRG=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-)
local src_format=$(grep 'SOURCE_FORMAT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_format=$(grep 'SOURCE_FORMAT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-)
local src_extract=$(grep 'SOURCE_EXTRACT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-)
local src_in_subdir=$(grep 'SOURCE_IN_SUBDIR=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_in_subdir=$(grep 'SOURCE_IN_SUBDIR=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-)
local src_filename=$(grep 'SOURCE_FILENAME=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_filename=$(grep 'SOURCE_FILENAME=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-)
@ -145,6 +163,7 @@ ynh_setup_source () {
src_in_subdir=${src_in_subdir:-true} src_in_subdir=${src_in_subdir:-true}
src_format=${src_format:-tar.gz} src_format=${src_format:-tar.gz}
src_format=$(echo "$src_format" | tr '[:upper:]' '[:lower:]') src_format=$(echo "$src_format" | tr '[:upper:]' '[:lower:]')
src_extract=${src_extract:-true}
if [ "$src_filename" = "" ] ; then if [ "$src_filename" = "" ] ; then
src_filename="${src_id}.${src_format}" src_filename="${src_id}.${src_format}"
fi fi
@ -163,7 +182,11 @@ ynh_setup_source () {
# Extract source into the app dir # Extract source into the app dir
mkdir -p "$dest_dir" mkdir -p "$dest_dir"
if [ "$src_format" = "zip" ]
if ! "$src_extract"
then
mv $src_filename $dest_dir
elif [ "$src_format" = "zip" ]
then then
# Zip format # Zip format
# Using of a temp directory, because unzip doesn't manage --strip-components # Using of a temp directory, because unzip doesn't manage --strip-components

7
debian/changelog vendored
View file

@ -1,3 +1,10 @@
yunohost (2.7.13.2) testing; urgency=low
* [fix] Fix an error with services marked as None (#466)
* [fix] Issue with nginx not upgrading correctly /etc/nginx/nginx.conf if it was manually modified
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 11 May 2018 02:06:42 +0000
yunohost (2.7.13.1) testing; urgency=low yunohost (2.7.13.1) testing; urgency=low
* [fix] Misc fixes on stretch migration following feedback * [fix] Misc fixes on stretch migration following feedback

View file

@ -5,7 +5,7 @@
"admin_password_changed": "تم تعديل الكلمة السرية الإدارية", "admin_password_changed": "تم تعديل الكلمة السرية الإدارية",
"app_already_installed": "{app:s} تم تنصيبه مِن قبل", "app_already_installed": "{app:s} تم تنصيبه مِن قبل",
"app_already_installed_cant_change_url": "", "app_already_installed_cant_change_url": "",
"app_already_up_to_date": "", "app_already_up_to_date": "{app:s} تم تحديثه مِن قَبل",
"app_argument_choice_invalid": "", "app_argument_choice_invalid": "",
"app_argument_invalid": "", "app_argument_invalid": "",
"app_argument_required": "", "app_argument_required": "",
@ -222,7 +222,7 @@
"migrate_tsig_wait_4": "30 ثانية …", "migrate_tsig_wait_4": "30 ثانية …",
"migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !", "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !",
"migrations_backward": "Migrating backward.", "migrations_backward": "Migrating backward.",
"migrations_bad_value_for_target": "Invalide number for target argument, available migrations numbers are 0 or {}", "migrations_bad_value_for_target": "Invalid number for target argument, available migrations numbers are 0 or {}",
"migrations_cant_reach_migration_file": "Can't access migrations files at path %s", "migrations_cant_reach_migration_file": "Can't access migrations files at path %s",
"migrations_current_target": "Migration target is {}", "migrations_current_target": "Migration target is {}",
"migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}", "migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}",

View file

@ -230,7 +230,7 @@
"migration_0003_patching_sources_list": "Patching the sources.lists ...", "migration_0003_patching_sources_list": "Patching the sources.lists ...",
"migration_0003_main_upgrade": "Starting main upgrade ...", "migration_0003_main_upgrade": "Starting main upgrade ...",
"migration_0003_fail2ban_upgrade": "Starting the fail2ban upgrade ...", "migration_0003_fail2ban_upgrade": "Starting the fail2ban upgrade ...",
"migration_0003_nginx_upgrade": "Starting the nginx-common upgrade ...", "migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset back to its original state first... The previous file will be available as {backup_dest}.",
"migration_0003_yunohost_upgrade": "Starting the yunohost package upgrade ... The migration will end, but the actual upgrade will happen right after. After the operation is complete, you might have to re-log on the webadmin.", "migration_0003_yunohost_upgrade": "Starting the yunohost package upgrade ... The migration will end, but the actual upgrade will happen right after. After the operation is complete, you might have to re-log on the webadmin.",
"migration_0003_not_jessie": "The current debian distribution is not Jessie !", "migration_0003_not_jessie": "The current debian distribution is not Jessie !",
"migration_0003_system_not_fully_up_to_date": "Your system is not fully up to date. Please perform a regular upgrade before running the migration to stretch.", "migration_0003_system_not_fully_up_to_date": "Your system is not fully up to date. Please perform a regular upgrade before running the migration to stretch.",
@ -335,9 +335,9 @@
"service_conf_up_to_date": "The configuration is already up-to-date for service '{service}'", "service_conf_up_to_date": "The configuration is already up-to-date for service '{service}'",
"service_conf_updated": "The configuration has been updated for service '{service}'", "service_conf_updated": "The configuration has been updated for service '{service}'",
"service_conf_would_be_updated": "The configuration would have been updated for service '{service}'", "service_conf_would_be_updated": "The configuration would have been updated for service '{service}'",
"service_disable_failed": "Unable to disable service '{service:s}'", "service_disable_failed": "Unable to disable service '{service:s}'\n\nRecent service logs:{logs:s}",
"service_disabled": "The service '{service:s}' has been disabled", "service_disabled": "The service '{service:s}' has been disabled",
"service_enable_failed": "Unable to enable service '{service:s}'", "service_enable_failed": "Unable to enable service '{service:s}'\n\nRecent service logs:{logs:s}",
"service_enabled": "The service '{service:s}' has been enabled", "service_enabled": "The service '{service:s}' has been enabled",
"service_no_log": "No log to display for service '{service:s}'", "service_no_log": "No log to display for service '{service:s}'",
"service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...", "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...",
@ -345,10 +345,10 @@
"service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...",
"service_remove_failed": "Unable to remove service '{service:s}'", "service_remove_failed": "Unable to remove service '{service:s}'",
"service_removed": "The service '{service:s}' has been removed", "service_removed": "The service '{service:s}' has been removed",
"service_start_failed": "Unable to start service '{service:s}'", "service_start_failed": "Unable to start service '{service:s}'\n\nRecent service logs:{logs:s}",
"service_started": "The service '{service:s}' has been started", "service_started": "The service '{service:s}' has been started",
"service_status_failed": "Unable to determine status of service '{service:s}'", "service_status_failed": "Unable to determine status of service '{service:s}'",
"service_stop_failed": "Unable to stop service '{service:s}'", "service_stop_failed": "Unable to stop service '{service:s}'\n\nRecent service logs:{logs:s}",
"service_stopped": "The service '{service:s}' has been stopped", "service_stopped": "The service '{service:s}' has been stopped",
"service_unknown": "Unknown service '{service:s}'", "service_unknown": "Unknown service '{service:s}'",
"ssowat_conf_generated": "The SSOwat configuration has been generated", "ssowat_conf_generated": "The SSOwat configuration has been generated",

View file

@ -200,9 +200,9 @@
"service_configuration_conflict": "Le fichier {file:s} a été modifié depuis sa dernière génération. Veuillez y appliquer les modifications manuellement ou utiliser loption --force (ce qui écrasera toutes les modifications effectuées sur le fichier).", "service_configuration_conflict": "Le fichier {file:s} a été modifié depuis sa dernière génération. Veuillez y appliquer les modifications manuellement ou utiliser loption --force (ce qui écrasera toutes les modifications effectuées sur le fichier).",
"service_configured": "La configuration du service « {service:s} » a été générée avec succès", "service_configured": "La configuration du service « {service:s} » a été générée avec succès",
"service_configured_all": "La configuration de tous les services a été générée avec succès", "service_configured_all": "La configuration de tous les services a été générée avec succès",
"service_disable_failed": "Impossible de désactiver le service « {service:s} »", "service_disable_failed": "Impossible de désactiver le service « {service:s} »\n\nJournaux récents : {logs:s}",
"service_disabled": "Le service « {service:s} » a été désactivé", "service_disabled": "Le service « {service:s} » a été désactivé",
"service_enable_failed": "Impossible d'activer le service « {service:s} »", "service_enable_failed": "Impossible dactiver le service « {service:s} »\n\nJournaux récents : {logs:s}",
"service_enabled": "Le service « {service:s} » a été activé", "service_enabled": "Le service « {service:s} » a été activé",
"service_no_log": "Aucun journal à afficher pour le service « {service:s} »", "service_no_log": "Aucun journal à afficher pour le service « {service:s} »",
"service_regenconf_dry_pending_applying": "Vérification des configurations en attentes qui pourraient être appliquées pour le service « {service} »…", "service_regenconf_dry_pending_applying": "Vérification des configurations en attentes qui pourraient être appliquées pour le service « {service} »…",
@ -210,10 +210,10 @@
"service_regenconf_pending_applying": "Application des configurations en attentes pour le service « {service} »…", "service_regenconf_pending_applying": "Application des configurations en attentes pour le service « {service} »…",
"service_remove_failed": "Impossible d'enlever le service « {service:s} »", "service_remove_failed": "Impossible d'enlever le service « {service:s} »",
"service_removed": "Le service « {service:s} » a été enlevé", "service_removed": "Le service « {service:s} » a été enlevé",
"service_start_failed": "Impossible de démarrer le service « {service:s} »", "service_start_failed": "Impossible de démarrer le service « {service:s} »\n\nJournaux récents : {logs:s}",
"service_started": "Le service « {service:s} » a été démarré", "service_started": "Le service « {service:s} » a été démarré",
"service_status_failed": "Impossible de déterminer le statut du service « {service:s} »", "service_status_failed": "Impossible de déterminer le statut du service « {service:s} »",
"service_stop_failed": "Impossible d'arrêter le service « {service:s} »", "service_stop_failed": "Impossible darrêter le service « {service:s} »\n\nJournaux récents : {logs:s}",
"service_stopped": "Le service « {service:s} » a été arrêté", "service_stopped": "Le service « {service:s} » a été arrêté",
"service_unknown": "Service « {service:s} » inconnu", "service_unknown": "Service « {service:s} » inconnu",
"services_configured": "La configuration a été générée avec succès", "services_configured": "La configuration a été générée avec succès",
@ -379,5 +379,24 @@
"migrate_tsig_wait_3": "1 minute…", "migrate_tsig_wait_3": "1 minute…",
"migrate_tsig_wait_4": "30 secondes…", "migrate_tsig_wait_4": "30 secondes…",
"migrate_tsig_not_needed": "Il ne semble pas que vous utilisez un domaine dyndns, donc aucune migration nest nécessaire !", "migrate_tsig_not_needed": "Il ne semble pas que vous utilisez un domaine dyndns, donc aucune migration nest nécessaire !",
"app_checkurl_is_deprecated": "Packagers /!\\ 'app checkurl' est obsolète ! Utilisez 'app register-url' en remplacement !" "app_checkurl_is_deprecated": "Packagers /!\\ 'app checkurl' est obsolète ! Utilisez 'app register-url' en remplacement !",
"migration_description_0001_change_cert_group_to_sslcert": "Change les permissions de groupe des certificats de « metronome » à « ssl-cert »",
"migration_description_0002_migrate_to_tsig_sha256": "Améliore la sécurité de DynDNDS TSIG en utilisant SHA512 au lieu de MD5",
"migration_description_0003_migrate_to_stretch": "Mise à niveau du système vers Debian Stretch et YunoHost 3.0",
"migration_0003_backward_impossible": "La migration Stretch nest pas réversible.",
"migration_0003_start": "Démarrage de la migration vers Stretch. Les journaux seront disponibles dans {logfile}.",
"migration_0003_patching_sources_list": "Modification de sources.lists…",
"migration_0003_main_upgrade": "Démarrage de la mise à niveau principale…",
"migration_0003_fail2ban_upgrade": "Démarrage de la mise à niveau de fail2ban…",
"migration_0003_restoring_origin_nginx_conf": "Votre fichier /etc/nginx/nginx.conf a été modifié dune manière ou dune autre. La migration va dabords le réinitialiser à son état initial… Le fichier précédent sera disponible en tant que {backup_dest}.",
"migration_0003_yunohost_upgrade": "Démarrage de la mise à niveau du paquet YunoHost… La migration terminera, mais la mise à jour réelle aura lieu immédiatement après. Après cette opération terminée, vous pourriez avoir à vous reconnecter à ladministration web.",
"migration_0003_not_jessie": "La distribution Debian actuelle nest pas Jessie !",
"migration_0003_system_not_fully_up_to_date": "Votre système nest pas complètement à jour. Veuillez mener une mise à jour classique avant de lancer à migration à Stretch.",
"migration_0003_still_on_jessie_after_main_upgrade": "Quelque chose sest ma passé pendant la mise à niveau principale : le système est toujours sur Jessie ?!? Pour investiguer le problème, veuillez regarder {log} 🙁…",
"migration_0003_general_warning": "Veuillez noter que cette migration est une opération délicate. Si léquipe YunoHost a fait de son mieux pour la relire et la tester, la migration pourrait tout de même casser des parties de votre système ou de vos applications.\n\nEn conséquence, nous vous recommandons :\n - de lancer une sauvegarde de vos données ou applications critiques ;\n - dêtre patient après avoir lancé la migration : selon votre connexion internet et matériel, cela pourrait prendre jusqu'à quelques heures pour que tout soit à niveau.",
"migration_0003_problematic_apps_warning": "Veuillez noter que les applications suivantes, éventuellement problématiques, ont été détectées. Il semble quelles naient pas été installées depuis une liste dapplication ou quelles ne soit pas marquées «working ». En conséquence, nous ne pouvons pas garantir quelles fonctionneront après la mise à niveau : {problematic_apps}",
"migration_0003_modified_files": "Veuillez noter que les fichiers suivants ont été détectés comme modifiés manuellement et pourraient être écrasés à la fin de la mise à niveau : {manually_modified_files}",
"migrations_list_conflict_pending_done": "Vous ne pouvez pas utiliser --previous et --done simultanément.",
"migrations_to_be_ran_manually": "La migration {number} {name} doit être lancée manuellement. Veuillez aller dans Outils > Migration dans linterface admin, ou lancer `yunohost tools migrations migrate`.",
"migrations_need_to_accept_disclaimer": "Pour lancer la migration {number} {name}, vous devez accepter cette clause de non-responsabilité :\n---\n{disclaimer}\n---\nSi vous acceptez de lancer la migration, veuillez relancer la commande avec loption --accept-disclaimer."
} }

View file

@ -3,5 +3,67 @@
"admin_password_change_failed": "Impossible de cambiar lo senhal", "admin_password_change_failed": "Impossible de cambiar lo senhal",
"admin_password_changed": "Lo senhal d'administracion es ben estat cambiat", "admin_password_changed": "Lo senhal d'administracion es ben estat cambiat",
"app_already_installed": "{app:s} es ja installat", "app_already_installed": "{app:s} es ja installat",
"app_already_up_to_date": "{app:s} es ja a jorn" "app_already_up_to_date": "{app:s} es ja a jorn",
"installation_complete": "Installacion acabada",
"app_id_invalid": "Id daplicacion incorrècte",
"app_install_files_invalid": "Fichièrs dinstallacion incorrèctes",
"app_no_upgrade": "Pas cap daplicacion de metre a jorn",
"app_not_correctly_installed": "{app:s} sembla pas ben installat",
"app_not_installed": "{app:s} es pas installat",
"app_not_properly_removed": "{app:s} es pas estat corrèctament suprimit",
"app_removed": "{app:s} es estat suprimit",
"app_unknown": "Aplicacion desconeguda",
"app_upgrade_app_name": "Mesa a jorn de laplicacion {app}...",
"app_upgrade_failed": "Impossible de metre a jorn {app:s}",
"app_upgrade_some_app_failed": "Daplicacions se pòdon pas metre a jorn",
"app_upgraded": "{app:s} es estat mes a jorn",
"appslist_fetched": "Recuperacion de la lista daplicacions {appslist:s} corrèctament realizada",
"appslist_migrating": "Migracion de la lista daplicacion{appslist:s}…",
"appslist_name_already_tracked": "I a ja una lista daplicacion enregistrada amb lo nom {name:s}.",
"appslist_removed": "Supression de la lista daplicacions {appslist:s} corrèctament realizada",
"appslist_retrieve_bad_format": "Lo fichièr recuperat per la lista daplicacions {appslist:s} es pas valid",
"appslist_unknown": "La lista daplicacions {appslist:s} es desconeguda.",
"appslist_url_already_tracked": "I a ja una lista daplicacions enregistrada amb lURL {url:s}.",
"ask_current_admin_password": "Senhal administrator actual",
"ask_email": "Adreça de corrièl",
"ask_firstname": "Prenom",
"ask_lastname": "Nom",
"ask_list_to_remove": "Lista de suprimir",
"ask_main_domain": "Domeni màger",
"ask_new_admin_password": "Nòu senhal administrator",
"ask_password": "Senhal",
"ask_path": "Camin",
"backup_action_required": "Devètz precisar çò que cal salvagardar",
"backup_app_failed": "Impossible de salvagardar laplicacion « {app:s} »",
"backup_applying_method_copy": "Còpia de totes los fichièrs dins la salvagarda…",
"backup_applying_method_tar": "Creacion de larchiu tar de la salvagarda…",
"backup_archive_name_exists": "Un archiu de salvagarda amb aquesta nom existís ja",
"backup_archive_name_unknown": "Larchiu local de salvagarda apelat « {name:s} » es desconegut",
"action_invalid": "Accion « {action:s} » incorrècte",
"app_argument_choice_invalid": "Causida invalida pel paramètre « {name:s} », cal que siá un de {choices:s}",
"app_argument_invalid": "Valor invalida pel paramètre « {name:s} » : {error:s}",
"app_argument_required": "Lo paramètre « {name:s} » es requesit",
"app_change_url_failed_nginx_reload": "La reaviada de nginx a fracassat. Vaquí la sortida de «nginx -t»:\n{nginx_errors:s}",
"app_change_url_identical_domains": "Lancian e lo novèl coble domeni/camin son identics per {domain:s}{path:s}, pas res a far.",
"app_change_url_success": "LURL de laplicacion {app:s} a cambiat per {domain:s}{path:s}",
"app_checkurl_is_deprecated": "Packagers /!\\ 'app checkurl' es obsolèt! Utilizatz 'app register-url' a la plaça!",
"app_extraction_failed": "Extraccion dels fichièrs dinstallacion impossibla",
"app_incompatible": "Laplicacion {app} es pas compatibla amb vòstra version de YunoHost",
"app_location_already_used": "Laplicacion « {app} » es ja installada a aqueste emplaçament ({path})",
"app_manifest_invalid": "Manifest daplicacion incorrècte: {error}",
"app_package_need_update": "Lo paquet de laplicacion {app} deu èsser mes a jorn per seguir los cambiaments de YunoHost",
"app_requirements_checking": "Verificacion dels paquets requesida per {app}...",
"app_sources_fetch_failed": "Recuperacion dels fichièrs fonts impossibla",
"app_unsupported_remote_type": "Lo tipe alonhat utilizat per laplicacion es pas suportat",
"appslist_retrieve_error": "Impossible de recuperar la lista daplicacions alonhadas {appslist:s}: {error:s}",
"backup_archive_app_not_found": "Laplicacion « {app:s} » es pas estada trobada dins larchiu de la salvagarda",
"backup_archive_broken_link": "Impossible daccedir a larchiu de salvagarda (ligam invalid cap a {path:s})",
"backup_archive_mount_failed": "Lo montatge de larchiu de salvagarda a fracassat",
"backup_archive_open_failed": "Impossible de dobrir larchiu de salvagarda",
"backup_archive_system_part_not_available": "La part « {part:s} » del sistèma es pas disponibla dins aquesta salvagarda",
"backup_cleaning_failed": "Impossible de netejar lo repertòri temporari de salvagarda",
"backup_copying_to_organize_the_archive": "Còpia de {size:s} Mio per organizar larchiu",
"backup_created": "Salvagarda acabada",
"backup_creating_archive": "Creacion de larchiu de salvagarda...",
"backup_creation_failed": "Impossible de crear la salvagarda"
} }

View file

@ -167,5 +167,6 @@
"app_already_up_to_date": "{app:s} já está atualizado", "app_already_up_to_date": "{app:s} já está atualizado",
"app_argument_choice_invalid": "Escolha inválida para o argumento '{name:s}', deve ser um dos {choices:s}", "app_argument_choice_invalid": "Escolha inválida para o argumento '{name:s}', deve ser um dos {choices:s}",
"app_argument_invalid": "Valor inválido de argumento '{name:s}': {error:s}", "app_argument_invalid": "Valor inválido de argumento '{name:s}': {error:s}",
"app_argument_required": "O argumento '{name:s}' é obrigatório" "app_argument_required": "O argumento '{name:s}' é obrigatório",
"app_change_url_failed_nginx_reload": "Falha ao reiniciar o nginx. Aqui está o retorno de 'nginx -t':\n{nginx_errors:s}"
} }

View file

@ -39,18 +39,20 @@ class MyMigration(Migration):
logger.warning(m18n.n("migration_0003_start", logfile=self.logfile)) logger.warning(m18n.n("migration_0003_start", logfile=self.logfile))
# Preparing the upgrade # Preparing the upgrade
self.restore_original_nginx_conf_if_needed()
logger.warning(m18n.n("migration_0003_patching_sources_list")) logger.warning(m18n.n("migration_0003_patching_sources_list"))
self.patch_apt_sources_list() self.patch_apt_sources_list()
self.backup_files_to_keep() self.backup_files_to_keep()
self.apt_update() self.apt_update()
apps_packages = self.get_apps_equivs_packages() apps_packages = self.get_apps_equivs_packages()
self.unhold(["metronome"]) self.unhold(["metronome"])
self.hold(YUNOHOST_PACKAGES + apps_packages + ["fail2ban", "nginx-common"]) self.hold(YUNOHOST_PACKAGES + apps_packages + ["fail2ban"])
# Main dist-upgrade # Main dist-upgrade
logger.warning(m18n.n("migration_0003_main_upgrade")) logger.warning(m18n.n("migration_0003_main_upgrade"))
_run_service_command("stop", "mysql") _run_service_command("stop", "mysql")
self.apt_dist_upgrade(conf_flags=["old", "def"]) self.apt_dist_upgrade(conf_flags=["old", "miss", "def"])
_run_service_command("start", "mysql") _run_service_command("start", "mysql")
if self.debian_major_version() == 8: if self.debian_major_version() == 8:
raise MoulinetteError(m18n.n("migration_0003_still_on_jessie_after_main_upgrade", log=self.logfile)) raise MoulinetteError(m18n.n("migration_0003_still_on_jessie_after_main_upgrade", log=self.logfile))
@ -66,11 +68,6 @@ class MyMigration(Migration):
self.apt_dist_upgrade(conf_flags=["new", "miss", "def"]) self.apt_dist_upgrade(conf_flags=["new", "miss", "def"])
_run_service_command("restart", "fail2ban") _run_service_command("restart", "fail2ban")
# Specific upgrade for nginx-common...
logger.warning(m18n.n("migration_0003_nginx_upgrade"))
self.unhold(["nginx-common"])
self.apt_dist_upgrade(conf_flags=["new", "def"])
# Clean the mess # Clean the mess
os.system("apt autoremove --assume-yes") os.system("apt autoremove --assume-yes")
os.system("apt clean --assume-yes") os.system("apt clean --assume-yes")
@ -297,3 +294,59 @@ class MyMigration(Migration):
for f in self.files_to_keep: for f in self.files_to_keep:
dest_file = f.strip('/').replace("/", "_") dest_file = f.strip('/').replace("/", "_")
copy2(os.path.join(tmp_dir, dest_file), f) copy2(os.path.join(tmp_dir, dest_file), f)
# On some setups, /etc/nginx/nginx.conf got edited. But this file needs
# to be upgraded because of the way the new module system works for nginx.
# (in particular, having the line that include the modules at the top)
#
# So here, if it got edited, we force the restore of the original conf
# *before* starting the actual upgrade...
#
# An alternative strategy that was attempted was to hold the nginx-common
# package and have a specific upgrade for it like for fail2ban, but that
# leads to apt complaining about not being able to upgrade for shitty
# reasons >.>
def restore_original_nginx_conf_if_needed(self):
if "/etc/nginx/nginx.conf" not in manually_modified_files_compared_to_debian_default():
return
if not os.path.exists("/etc/nginx/nginx.conf"):
return
# If stretch is in the sources.list, we already started migrating on
# stretch so we don't re-do this
if " stretch " in read_file("/etc/apt/sources.list"):
return
backup_dest = "/home/yunohost.conf/backup/nginx.conf.bkp_before_stretch"
logger.warning(m18n.n("migration_0003_restoring_origin_nginx_conf",
backup_dest=backup_dest))
os.system("mv /etc/nginx/nginx.conf %s" % backup_dest)
command = ""
command += " DEBIAN_FRONTEND=noninteractive"
command += " APT_LISTCHANGES_FRONTEND=none"
command += " apt-get"
command += " --fix-broken --show-upgraded --assume-yes"
command += ' -o Dpkg::Options::="--force-confmiss"'
command += " install --reinstall"
command += " nginx-common"
logger.debug("Running apt command :\n{}".format(command))
command += " 2>&1 | tee -a {}".format(self.logfile)
is_api = msettings.get('interface') == 'api'
if is_api:
callbacks = (
lambda l: logger.info(l.rstrip()),
lambda l: logger.warning(l.rstrip()),
)
call_async_output(command, callbacks, shell=True)
else:
# We do this when running from the cli to have the output of the
# command showing in the terminal, since 'info' channel is only
# enabled if the user explicitly add --verbose ...
os.system(command)

View file

@ -76,6 +76,7 @@ def service_add(name, status=None, log=None, runlevel=None):
try: try:
_save_services(services) _save_services(services)
except: except:
# we'll get a logger.warning with more details in _save_services
raise MoulinetteError(errno.EIO, m18n.n('service_add_failed', service=name)) raise MoulinetteError(errno.EIO, m18n.n('service_add_failed', service=name))
logger.success(m18n.n('service_added', service=name)) logger.success(m18n.n('service_added', service=name))
@ -99,6 +100,7 @@ def service_remove(name):
try: try:
_save_services(services) _save_services(services)
except: except:
# we'll get a logger.warning with more details in _save_services
raise MoulinetteError(errno.EIO, m18n.n('service_remove_failed', service=name)) raise MoulinetteError(errno.EIO, m18n.n('service_remove_failed', service=name))
logger.success(m18n.n('service_removed', service=name)) logger.success(m18n.n('service_removed', service=name))
@ -114,13 +116,16 @@ def service_start(names):
""" """
if isinstance(names, str): if isinstance(names, str):
names = [names] names = [names]
for name in names: for name in names:
if _run_service_command('start', name): if _run_service_command('start', name):
logger.success(m18n.n('service_started', service=name)) logger.success(m18n.n('service_started', service=name))
else: else:
if service_status(name)['status'] != 'running': if service_status(name)['status'] != 'running':
raise MoulinetteError(errno.EPERM, raise MoulinetteError(errno.EPERM,
m18n.n('service_start_failed', service=name)) m18n.n('service_start_failed',
service=name,
logs=_get_journalctl_logs(name)))
logger.info(m18n.n('service_already_started', service=name)) logger.info(m18n.n('service_already_started', service=name))
@ -140,7 +145,9 @@ def service_stop(names):
else: else:
if service_status(name)['status'] != 'inactive': if service_status(name)['status'] != 'inactive':
raise MoulinetteError(errno.EPERM, raise MoulinetteError(errno.EPERM,
m18n.n('service_stop_failed', service=name)) m18n.n('service_stop_failed',
service=name,
logs=_get_journalctl_logs(name)))
logger.info(m18n.n('service_already_stopped', service=name)) logger.info(m18n.n('service_already_stopped', service=name))
@ -159,7 +166,9 @@ def service_enable(names):
logger.success(m18n.n('service_enabled', service=name)) logger.success(m18n.n('service_enabled', service=name))
else: else:
raise MoulinetteError(errno.EPERM, raise MoulinetteError(errno.EPERM,
m18n.n('service_enable_failed', service=name)) m18n.n('service_enable_failed',
service=name,
logs=_get_journalctl_logs(name)))
def service_disable(names): def service_disable(names):
@ -177,7 +186,9 @@ def service_disable(names):
logger.success(m18n.n('service_disabled', service=name)) logger.success(m18n.n('service_disabled', service=name))
else: else:
raise MoulinetteError(errno.EPERM, raise MoulinetteError(errno.EPERM,
m18n.n('service_disable_failed', service=name)) m18n.n('service_disable_failed',
service=name,
logs=_get_journalctl_logs(name)))
def service_status(names=[]): def service_status(names=[]):
@ -265,21 +276,33 @@ def service_log(name, number=50):
if name not in services.keys(): if name not in services.keys():
raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=name)) raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=name))
if 'log' in services[name]: if 'log' not in services[name]:
log_list = services[name]['log']
result = {}
if not isinstance(log_list, list):
log_list = [log_list]
for log_path in log_list:
if os.path.isdir(log_path):
for log in [f for f in os.listdir(log_path) if os.path.isfile(os.path.join(log_path, f)) and f[-4:] == '.log']:
result[os.path.join(log_path, log)] = _tail(os.path.join(log_path, log), int(number))
else:
result[log_path] = _tail(log_path, int(number))
else:
raise MoulinetteError(errno.EPERM, m18n.n('service_no_log', service=name)) raise MoulinetteError(errno.EPERM, m18n.n('service_no_log', service=name))
log_list = services[name]['log']
if not isinstance(log_list, list):
log_list = [log_list]
result = {}
for log_path in log_list:
# log is a file, read it
if not os.path.isdir(log_path):
result[log_path] = _tail(log_path, int(number))
continue
for log_file in os.listdir(log_path):
log_file_path = os.path.join(log_path, log_file)
# not a file : skip
if not os.path.isfile(log_file_path):
continue
if not log_file.endswith(".log"):
continue
result[log_file_path] = _tail(log_file_path, int(number))
return result return result
@ -301,14 +324,19 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
# Return the list of pending conf # Return the list of pending conf
if list_pending: if list_pending:
pending_conf = _get_pending_conf(names) pending_conf = _get_pending_conf(names)
if with_diff:
for service, conf_files in pending_conf.items(): if not with_diff:
for system_path, pending_path in conf_files.items(): return pending_conf
pending_conf[service][system_path] = {
'pending_conf': pending_path, for service, conf_files in pending_conf.items():
'diff': _get_files_diff( for system_path, pending_path in conf_files.items():
system_path, pending_path, True),
} pending_conf[service][system_path] = {
'pending_conf': pending_path,
'diff': _get_files_diff(
system_path, pending_path, True),
}
return pending_conf return pending_conf
# Clean pending conf directory # Clean pending conf directory
@ -332,12 +360,15 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
# create the pending conf directory for the service # create the pending conf directory for the service
service_pending_path = os.path.join(PENDING_CONF_DIR, name) service_pending_path = os.path.join(PENDING_CONF_DIR, name)
filesystem.mkdir(service_pending_path, 0755, True, uid='admin') filesystem.mkdir(service_pending_path, 0755, True, uid='admin')
# return the arguments to pass to the script # return the arguments to pass to the script
return pre_args + [service_pending_path, ] return pre_args + [service_pending_path, ]
pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call) pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call)
# Update the services name # Update the services name
names = pre_result['succeed'].keys() names = pre_result['succeed'].keys()
if not names: if not names:
raise MoulinetteError(errno.EIO, raise MoulinetteError(errno.EIO,
m18n.n('service_regenconf_failed', m18n.n('service_regenconf_failed',
@ -395,6 +426,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
'service_conf_file_manually_removed', 'service_conf_file_manually_removed',
conf=system_path)) conf=system_path))
conf_status = 'removed' conf_status = 'removed'
# -> system conf is not managed yet # -> system conf is not managed yet
elif not saved_hash: elif not saved_hash:
logger.debug("> system conf is not managed yet") logger.debug("> system conf is not managed yet")
@ -418,6 +450,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
logger.warning(m18n.n('service_conf_file_kept_back', logger.warning(m18n.n('service_conf_file_kept_back',
conf=system_path, service=service)) conf=system_path, service=service))
conf_status = 'unmanaged' conf_status = 'unmanaged'
# -> system conf has not been manually modified # -> system conf has not been manually modified
elif system_hash == saved_hash: elif system_hash == saved_hash:
if to_remove: if to_remove:
@ -430,6 +463,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
logger.debug("> system conf is already up-to-date") logger.debug("> system conf is already up-to-date")
os.remove(pending_path) os.remove(pending_path)
continue continue
else: else:
logger.debug("> system conf has been manually modified") logger.debug("> system conf has been manually modified")
if system_hash == new_hash: if system_hash == new_hash:
@ -466,6 +500,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
'service_conf_updated' if not dry_run else 'service_conf_updated' if not dry_run else
'service_conf_would_be_updated', 'service_conf_would_be_updated',
service=service)) service=service))
if succeed_regen and not dry_run: if succeed_regen and not dry_run:
_update_conf_hashes(service, conf_hashes) _update_conf_hashes(service, conf_hashes)
@ -489,6 +524,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
else: else:
regen_conf_files = '' regen_conf_files = ''
return post_args + [regen_conf_files, ] return post_args + [regen_conf_files, ]
hook_callback('conf_regen', names, pre_callback=_pre_call) hook_callback('conf_regen', names, pre_callback=_pre_call)
return result return result
@ -507,11 +543,11 @@ def _run_service_command(action, service):
if service not in services.keys(): if service not in services.keys():
raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=service)) raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=service))
cmd = None possible_actions = ['start', 'stop', 'restart', 'reload', 'enable', 'disable']
if action in ['start', 'stop', 'restart', 'reload', 'enable', 'disable']: if action not in possible_actions:
cmd = 'systemctl %s %s' % (action, service) raise ValueError("Unknown action '%s', available actions are: %s" % (action, ", ".join(possible_actions)))
else:
raise ValueError("Unknown action '%s'" % action) cmd = 'systemctl %s %s' % (action, service)
need_lock = services[service].get('need_lock', False) \ need_lock = services[service].get('need_lock', False) \
and action in ['start', 'stop', 'restart', 'reload'] and action in ['start', 'stop', 'restart', 'reload']
@ -526,14 +562,17 @@ def _run_service_command(action, service):
PID = _give_lock(action, service, p) PID = _give_lock(action, service, p)
# Wait for the command to complete # Wait for the command to complete
p.communicate() p.communicate()
# Remove the lock if one was given
if need_lock and PID != 0:
_remove_lock(PID)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
# TODO: Log output? # TODO: Log output?
logger.warning(m18n.n('service_cmd_exec_failed', command=' '.join(e.cmd))) logger.warning(m18n.n('service_cmd_exec_failed', command=' '.join(e.cmd)))
return False return False
finally:
# Remove the lock if one was given
if need_lock and PID != 0:
_remove_lock(PID)
return True return True
@ -566,6 +605,7 @@ def _give_lock(action, service, p):
return son_PID return son_PID
def _remove_lock(PID_to_remove): def _remove_lock(PID_to_remove):
# FIXME ironically not concurrency safe because it's not atomic...
PIDs = filesystem.read_file(MOULINETTE_LOCK).split("\n") PIDs = filesystem.read_file(MOULINETTE_LOCK).split("\n")
PIDs_to_keep = [ PID for PID in PIDs if int(PID) != PID_to_remove ] PIDs_to_keep = [ PID for PID in PIDs if int(PID) != PID_to_remove ]
@ -600,12 +640,15 @@ def _save_services(services):
services -- A dict of managed services with their parameters services -- A dict of managed services with their parameters
""" """
# TODO: Save to custom services.yml try:
with open('/etc/yunohost/services.yml', 'w') as f: with open('/etc/yunohost/services.yml', 'w') as f:
yaml.safe_dump(services, f, default_flow_style=False) yaml.safe_dump(services, f, default_flow_style=False)
except Exception as e:
logger.warning('Error while saving services, exception: %s', e, exc_info=1)
raise
def _tail(file, n, offset=None): def _tail(file, n):
""" """
Reads a n lines from f with an offset of offset lines. The return Reads a n lines from f with an offset of offset lines. The return
value is a tuple in the form ``(lines, has_more)`` where `has_more` is value is a tuple in the form ``(lines, has_more)`` where `has_more` is
@ -613,7 +656,7 @@ def _tail(file, n, offset=None):
""" """
avg_line_length = 74 avg_line_length = 74
to_read = n + (offset or 0) to_read = n
try: try:
with open(file, 'r') as f: with open(file, 'r') as f:
@ -624,13 +667,17 @@ def _tail(file, n, offset=None):
# woops. apparently file is smaller than what we want # woops. apparently file is smaller than what we want
# to step back, go to the beginning instead # to step back, go to the beginning instead
f.seek(0) f.seek(0)
pos = f.tell() pos = f.tell()
lines = f.read().splitlines() lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0: if len(lines) >= to_read or pos == 0:
return lines[-to_read:offset and -offset or None] return lines[-to_read]
avg_line_length *= 1.3 avg_line_length *= 1.3
except IOError: except IOError as e:
logger.warning("Error while tailing file '%s': %s", file, e, exc_info=1)
return [] return []
@ -642,36 +689,39 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True):
header can also be removed if skip_header is True. header can also be removed if skip_header is True.
""" """
contents = [[], []] with open(orig_file, 'r') as orig_file:
for i, path in enumerate((orig_file, new_file)): orig_file = orig_file.readlines()
try:
with open(path, 'r') as f: with open(new_file, 'r') as new_file:
contents[i] = f.readlines() new_file.readlines()
except IOError:
pass
# Compare files and format output # Compare files and format output
diff = unified_diff(contents[0], contents[1]) diff = unified_diff(orig_file, new_file)
if skip_header: if skip_header:
for i in range(2): try:
try: next(diff)
next(diff) next(diff)
except: except:
break pass
if as_string: if as_string:
result = ''.join(line for line in diff) return ''.join(diff).rstrip()
return result.rstrip()
return diff return diff
def _calculate_hash(path): def _calculate_hash(path):
"""Calculate the MD5 hash of a file""" """Calculate the MD5 hash of a file"""
hasher = hashlib.md5() hasher = hashlib.md5()
try: try:
with open(path, 'rb') as f: with open(path, 'rb') as f:
hasher.update(f.read()) hasher.update(f.read())
return hasher.hexdigest() return hasher.hexdigest()
except IOError:
except IOError as e:
logger.warning("Error while calculating file '%s' hash: %s", path, e, exc_info=1)
return None return None
@ -687,25 +737,33 @@ def _get_pending_conf(services=[]):
""" """
result = {} result = {}
if not os.path.isdir(PENDING_CONF_DIR): if not os.path.isdir(PENDING_CONF_DIR):
return result return result
if not services: if not services:
services = os.listdir(PENDING_CONF_DIR) services = os.listdir(PENDING_CONF_DIR)
for name in services: for name in services:
service_pending_path = os.path.join(PENDING_CONF_DIR, name) service_pending_path = os.path.join(PENDING_CONF_DIR, name)
if not os.path.isdir(service_pending_path): if not os.path.isdir(service_pending_path):
continue continue
path_index = len(service_pending_path) path_index = len(service_pending_path)
service_conf = {} service_conf = {}
for root, dirs, files in os.walk(service_pending_path): for root, dirs, files in os.walk(service_pending_path):
for filename in files: for filename in files:
pending_path = os.path.join(root, filename) pending_path = os.path.join(root, filename)
service_conf[pending_path[path_index:]] = pending_path service_conf[pending_path[path_index:]] = pending_path
if service_conf: if service_conf:
result[name] = service_conf result[name] = service_conf
else: else:
# remove empty directory # remove empty directory
shutil.rmtree(service_pending_path, ignore_errors=True) shutil.rmtree(service_pending_path, ignore_errors=True)
return result return result
@ -717,9 +775,11 @@ def _get_conf_hashes(service):
if service not in services: if service not in services:
logger.debug("Service %s is not in services.yml yet.", service) logger.debug("Service %s is not in services.yml yet.", service)
return {} return {}
elif services[service] is None or 'conffiles' not in services[service]: elif services[service] is None or 'conffiles' not in services[service]:
logger.debug("No configuration files for service %s.", service) logger.debug("No configuration files for service %s.", service)
return {} return {}
else: else:
return services[service]['conffiles'] return services[service]['conffiles']
@ -752,11 +812,14 @@ def _process_regen_conf(system_conf, new_conf=None, save=True):
backup_path = os.path.join(BACKUP_CONF_DIR, '{0}-{1}'.format( backup_path = os.path.join(BACKUP_CONF_DIR, '{0}-{1}'.format(
system_conf.lstrip('/'), time.strftime("%Y%m%d.%H%M%S"))) system_conf.lstrip('/'), time.strftime("%Y%m%d.%H%M%S")))
backup_dir = os.path.dirname(backup_path) backup_dir = os.path.dirname(backup_path)
if not os.path.isdir(backup_dir): if not os.path.isdir(backup_dir):
filesystem.mkdir(backup_dir, 0755, True) filesystem.mkdir(backup_dir, 0755, True)
shutil.copy2(system_conf, backup_path) shutil.copy2(system_conf, backup_path)
logger.info(m18n.n('service_conf_file_backed_up', logger.info(m18n.n('service_conf_file_backed_up',
conf=system_conf, backup=backup_path)) conf=system_conf, backup=backup_path))
try: try:
if not new_conf: if not new_conf:
os.remove(system_conf) os.remove(system_conf)
@ -764,19 +827,26 @@ def _process_regen_conf(system_conf, new_conf=None, save=True):
conf=system_conf)) conf=system_conf))
else: else:
system_dir = os.path.dirname(system_conf) system_dir = os.path.dirname(system_conf)
if not os.path.isdir(system_dir): if not os.path.isdir(system_dir):
filesystem.mkdir(system_dir, 0755, True) filesystem.mkdir(system_dir, 0755, True)
shutil.copyfile(new_conf, system_conf) shutil.copyfile(new_conf, system_conf)
logger.info(m18n.n('service_conf_file_updated', logger.info(m18n.n('service_conf_file_updated',
conf=system_conf)) conf=system_conf))
except: except Exception as e:
logger.warning("Exception while trying to regenerate conf '%s': %s", system_conf, e, exc_info=1)
if not new_conf and os.path.exists(system_conf): if not new_conf and os.path.exists(system_conf):
logger.warning(m18n.n('service_conf_file_remove_failed', logger.warning(m18n.n('service_conf_file_remove_failed',
conf=system_conf), conf=system_conf),
exc_info=1) exc_info=1)
return False return False
elif new_conf: elif new_conf:
try: try:
# From documentation:
# Raise an exception if an os.stat() call on either pathname fails.
# (os.stats returns a series of information from a file like type, size...)
copy_succeed = os.path.samefile(system_conf, new_conf) copy_succeed = os.path.samefile(system_conf, new_conf)
except: except:
copy_succeed = False copy_succeed = False
@ -786,8 +856,10 @@ def _process_regen_conf(system_conf, new_conf=None, save=True):
conf=system_conf, new=new_conf), conf=system_conf, new=new_conf),
exc_info=1) exc_info=1)
return False return False
return True return True
def manually_modified_files(): def manually_modified_files():
# We do this to have --quiet, i.e. don't throw a whole bunch of logs # We do this to have --quiet, i.e. don't throw a whole bunch of logs
@ -810,6 +882,14 @@ def manually_modified_files():
return output return output
def _get_journalctl_logs(service):
try:
return subprocess.check_output("journalctl -xn -u %s" % service, shell=True)
except:
import traceback
return "error while get services logs from journalctl:\n%s" % traceback.format_exc()
def manually_modified_files_compared_to_debian_default(): def manually_modified_files_compared_to_debian_default():
# from https://serverfault.com/a/90401 # from https://serverfault.com/a/90401
@ -818,4 +898,3 @@ def manually_modified_files_compared_to_debian_default():
| md5sum -c 2>/dev/null \ | md5sum -c 2>/dev/null \
| awk -F': ' '$2 !~ /OK/{print $1}'", shell=True) | awk -F': ' '$2 !~ /OK/{print $1}'", shell=True)
return r.strip().split("\n") return r.strip().split("\n")

View file

@ -666,7 +666,7 @@ def _check_if_vulnerable_to_meltdown():
stderr=subprocess.STDOUT) stderr=subprocess.STDOUT)
output, _ = call.communicate() output, _ = call.communicate()
assert call.returncode == 0 assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode
CVEs = json.loads(output) CVEs = json.loads(output)
assert len(CVEs) == 1 assert len(CVEs) == 1

View file

@ -1,16 +1,57 @@
Spectre & Meltdown Checker Spectre & Meltdown Checker
========================== ==========================
A simple shell script to tell if your Linux installation is vulnerable against the 3 "speculative execution" CVEs that were made public early 2018. A shell script to tell if your system is vulnerable against the 3 "speculative execution" CVEs that were made public early 2018.
Without options, it'll inspect your currently running kernel. Supported operating systems:
You can also specify a kernel image on the command line, if you'd like to inspect a kernel you're not running. - Linux (all versions, flavors and distros)
- BSD (FreeBSD, NetBSD, DragonFlyBSD)
The script will do its best to detect mitigations, including backported non-vanilla patches, regardless of the advertised kernel version number. Supported architectures:
- x86 (32 bits)
- amd64/x86_64 (64 bits)
- ARM and ARM64
- other architectures will work, but mitigations (if they exist) might not always be detected
For Linux systems, the script will detect mitigations, including backported non-vanilla patches, regardless of the advertised kernel version number and the distribution (such as Debian, Ubuntu, CentOS, RHEL, Fedora, openSUSE, Arch, ...), it also works if you've compiled your own kernel.
For BSD systems, the detection will work as long as the BSD you're using supports `cpuctl` and `linprocfs` (this is not the case of OpenBSD for example).
## Easy way to run the script
- Get the latest version of the script using `curl` *or* `wget`
```bash
curl -L https://meltdown.ovh -o spectre-meltdown-checker.sh
wget https://meltdown.ovh -O spectre-meltdown-checker.sh
```
- Inspect the script. You never blindly run scripts you downloaded from the Internet, do you?
```bash
vim spectre-meltdown-checker.sh
```
- When you're ready, run the script as root
```bash
chmod +x spectre-meltdown-checker.sh
sudo ./spectre-meltdown-checker.sh
```
## Example of script output ## Example of script output
![checker](https://framapic.org/6O4v4AAwMenv/M6J4CFWwsB3z.png) - Intel Haswell CPU running under Ubuntu 16.04 LTS
![haswell](https://framapic.org/1kWmNwE6ll0p/ayTRX9JRlHJ7.png)
- AMD Ryzen running under OpenSUSE Tumbleweed
![ryzen](https://framapic.org/TkWbuh421YQR/6MAGUP3lL6Ne.png)
- Batch mode (JSON flavor)
![batch](https://framapic.org/HEcWFPrLewbs/om1LdufspWTJ.png)
## Quick summary of the CVEs ## Quick summary of the CVEs
@ -38,8 +79,10 @@ The script will do its best to detect mitigations, including backported non-vani
This tool does its best to determine whether your system is immune (or has proper mitigations in place) for the collectively named "speculative execution" vulnerabilities. It doesn't attempt to run any kind of exploit, and can't guarantee that your system is secure, but rather helps you verifying whether your system has the known correct mitigations in place. This tool does its best to determine whether your system is immune (or has proper mitigations in place) for the collectively named "speculative execution" vulnerabilities. It doesn't attempt to run any kind of exploit, and can't guarantee that your system is secure, but rather helps you verifying whether your system has the known correct mitigations in place.
However, some mitigations could also exist in your kernel that this script doesn't know (yet) how to detect, or it might falsely detect mitigations that in the end don't work as expected (for example, on backported or modified kernels). However, some mitigations could also exist in your kernel that this script doesn't know (yet) how to detect, or it might falsely detect mitigations that in the end don't work as expected (for example, on backported or modified kernels).
Your system exposure also depends on your CPU. As of now, AMD and ARM processors are marked as immune to some or all of these vulnerabilities (except some specific ARM models). All Intel processors manufactured since circa 1995 are thought to be vulnerable. Whatever processor one uses, one might seek more information from the manufacturer of that processor and/or of the device in which it runs. Your system exposure also depends on your CPU. As of now, AMD and ARM processors are marked as immune to some or all of these vulnerabilities (except some specific ARM models). All Intel processors manufactured since circa 1995 are thought to be vulnerable, except some specific/old models, such as some early Atoms. Whatever processor one uses, one might seek more information from the manufacturer of that processor and/or of the device in which it runs.
The nature of the discovered vulnerabilities being quite new, the landscape of vulnerable processors can be expected to change over time, which is why this script makes the assumption that all CPUs are vulnerable, except if the manufacturer explicitly stated otherwise in a verifiable public announcement. The nature of the discovered vulnerabilities being quite new, the landscape of vulnerable processors can be expected to change over time, which is why this script makes the assumption that all CPUs are vulnerable, except if the manufacturer explicitly stated otherwise in a verifiable public announcement.
Please also note that for Spectre vulnerabilities, all software can possibly be exploited, this tool only verifies that the kernel (which is the core of the system) you're using has the proper protections in place. Verifying all the other software is out of the scope of this tool. As a general measure, ensure you always have the most up to date stable versions of all the software you use, especially for those who are exposed to the world, such as network daemons and browsers.
This tool has been released in the hope that it'll be useful, but don't use it to jump to conclusions about your security. This tool has been released in the hope that it'll be useful, but don't use it to jump to conclusions about your security.

File diff suppressed because it is too large Load diff