From abe4e94f67a36020452e1623eaa8ea3720f53b8a Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Dec 2020 00:33:38 +0100 Subject: [PATCH] Zblerg^5457 --- package_check.sh | 237 +++++++++-------- sub_scripts/launcher.sh | 112 -------- sub_scripts/lxc_check.sh | 473 --------------------------------- sub_scripts/notifications.sh | 196 -------------- sub_scripts/testing_process.sh | 469 ++++++++++++-------------------- 5 files changed, 285 insertions(+), 1202 deletions(-) delete mode 100755 sub_scripts/lxc_check.sh delete mode 100755 sub_scripts/notifications.sh diff --git a/package_check.sh b/package_check.sh index fb7cdad..85fd966 100755 --- a/package_check.sh +++ b/package_check.sh @@ -6,15 +6,12 @@ source "./sub_scripts/launcher.sh" source "./sub_scripts/testing_process.sh" complete_log="./Complete.log" -test_series="" # Purge some log files > "$complete_log" > "./lxc_boot.log" -TEST_CONTEXT="./.tmp_test_context" -rm -rf $TEST_CONTEXT -mkdir -p $TEST_CONTEXT +TEST_CONTEXT=$(mkdtemp -d) # Redirect fd 3 (=debug steam) to complete log exec 3>>$complete_log @@ -202,6 +199,7 @@ fi # Stop and restore the LXC container. In case of previous incomplete execution. LXC_STOP LXC_TURNOFF +LXC_PURGE_SNAPSHOTS #================================================= # Pick up the package @@ -292,8 +290,7 @@ COMPUTE_RESULTS_SUMMARY () { print_result "Install (root)" $RESULT_check_root print_result "Install (subpath)" $RESULT_check_subdir print_result "Install (no url)" $RESULT_check_nourl - print_result "Install (private mode)" $RESULT_check_private - print_result "Install (public mode)" $RESULT_check_public + print_result "Install (private)" $RESULT_check_private print_result "Install (multi-instance)" $RESULT_check_multi_instance print_result "Upgrade" $RESULT_check_upgrade print_result "Backup" $RESULT_check_backup @@ -335,7 +332,6 @@ COMPUTE_RESULTS_SUMMARY () { [ $RESULT_check_subdir -ne -1 ] && \ [ $RESULT_check_root -ne -1 ] && \ [ $RESULT_check_private -ne -1 ] && \ - [ $RESULT_check_public -ne -1 ] && \ [ $RESULT_check_multi_instance -ne -1 ] } @@ -381,7 +377,6 @@ COMPUTE_RESULTS_SUMMARY () { [ $RESULT_check_subdir -ne -1 ] && \ [ $RESULT_check_upgrade -ne -1 ] && \ [ $RESULT_check_private -ne -1 ] && \ - [ $RESULT_check_public -ne -1 ] && \ [ $RESULT_check_multi_instance -ne -1 ] && \ [ $RESULT_check_port -ne -1 ] && \ [ $RESULT_check_backup -ne -1 ] && \ @@ -488,37 +483,6 @@ COMPUTE_RESULTS_SUMMARY () { done } -#================================================= -# Parsing and performing tests -#================================================= - - -# Default values for check_process and TESTING_PROCESS -init_results() { - local test_serie_id=$1 - cat << EOF > $TEST_CONTEXT/$test_serie_id/results -RESULT_witness=0 -RESULT_alias_traversal=0 -RESULT_linter=0 -RESULT_linter_level_6=0 -RESULT_linter_level_7=0 -RESULT_linter_level_8=0 -RESULT_linter_broken=0 -RESULT_check_subdir=0 -RESULT_check_root=0 -RESULT_check_nourl=0 -RESULT_check_upgrade=0 -RESULT_check_backup=0 -RESULT_check_restore=0 -RESULT_check_private=0 -RESULT_check_public=0 -RESULT_check_multi_instance=0 -RESULT_check_port=0 -RESULT_change_url=0 -RESULT_action_config_panel=0 -EOF -} - #================================================= # Parse the check_process #================================================= @@ -563,9 +527,6 @@ parse_check_process() { # Remove all spaces at the beginning of the lines sed --in-place 's/^[ \t]*//g' "$check_process" - # Extract the Options section - extract_check_process_section "^;;; Options" ";; " > $TEST_CONTEXT/check_process.options - # Extract the Upgrade infos extract_check_process_section "^;;; Upgrade options" ";; " > $TEST_CONTEXT/check_process.upgrade_options mkdir -p $TEST_CONTEXT/upgrades @@ -576,74 +537,121 @@ parse_check_process() { done rm $TEST_CONTEXT/check_process.upgrade_options + local test_serie_id="0" + # Parse each tests serie while read <&3 tests_serie do - local test_serie_id=$(tr -dc A-Za-z0-9 $test_serie_dir/test_serie_name extract_check_process_section "^$tests_serie" "^;;" > $test_serie_rawconf - extract_check_process_section "^; pre-install" "^; " $test_serie_rawconf > $test_serie_dir/preinstall.sh.template # This is the arg list to be later fed to "yunohost app install" # Looking like domain=foo.com&path=/bar&password=stuff # "Standard" arguments like domain/path will later be overwritten # during tests - extract_check_process_section "^; Manifest" "^; " $test_serie_rawconf | awk '{print $1}' | tr -d '"' | tr '\n' '&' > $test_serie_dir/install_args - extract_check_process_section "^; Actions" "^; " $test_serie_rawconf > $test_serie_dir/check_process.actions_infos - extract_check_process_section "^; Config_panel" "^; " $test_serie_rawconf > $test_serie_dir/check_process.configpanel_infos - extract_check_process_section "^; Checks" "^; " $test_serie_rawconf > $test_serie_dir/check_process.tests_infos + local install_args=$( extract_check_process_section "^; Manifest" "^; " $test_serie_rawconf | awk '{print $1}' | tr -d '"' | tr '\n' '&') + local preinstall_template=$(extract_check_process_section "^; pre-install" "^; " $test_serie_rawconf) + local action_infos=$( extract_check_process_section "^; Actions" "^; " $test_serie_rawconf) + local configpanel_infos=$( extract_check_process_section "^; Config_panel" "^; " $test_serie_rawconf) + + extract_check_process_section "^; Checks" "^; " $test_serie_rawconf > $TEST_CONTEXT/check_process.tests_infos is_test_enabled () { # Find the line for the given check option - local value=$(grep -m1 -o "^$1=." "$test_serie_dir/check_process.tests_infos" | awk -F= '{print $2}') + local value=$(grep -m1 -o "^$1=." "$TEST_CONTEXT/check_process.tests_infos" | awk -F= '{print $2}') # And return this value [ "${value:0:1}" = "1" ] } - is_test_enabled pkg_linter && echo "PACKAGE_LINTER" >> $test_serie_dir/tests_to_perform - is_test_enabled setup_sub_dir && echo "TEST_INSTALL subdir" >> $test_serie_dir/tests_to_perform - is_test_enabled setup_root && echo "TEST_INSTALL root" >> $test_serie_dir/tests_to_perform - is_test_enabled setup_nourl && echo "TEST_INSTALL nourl" >> $test_serie_dir/tests_to_perform - is_test_enabled setup_private && echo "TEST_PUBLIC_PRIVATE private" >> $test_serie_dir/tests_to_perform - is_test_enabled setup_public && echo "TEST_PUBLIC_PRIVATE public" >> $test_serie_dir/tests_to_perform - is_test_enabled multi_instance && echo "TEST_MULTI_INSTANCE" >> $test_serie_dir/tests_to_perform - is_test_enabled backup_restore && echo "TEST_BACKUP_RESTORE" >> $test_serie_dir/tests_to_perform - + add_test() { + local test_type="$1" + local test_arg="$2" + test_id="$((test_id+1))" + local extra="{}" + local _install_args="$install_args" + + # Upgrades with a specific commit + if [[ "$test_type" == "TEST_UPGRADE" ]] && [[ -n "$test_arg" ]] + then + local specific_upgrade_install_args="$(grep "^manifest_arg=" "$TEST_CONTEXT/upgrades/$commit" | cut -d'=' -f2-)" + [[ -n "$specific_upgrade_install_args" ]] && _install_args="$specific_upgrade_install_args" + + local upgrade_name="$(grep "^name=" "$TEST_CONTEXT/upgrades/$commit" | cut -d'=' -f2)" + extra="$(jq -n --arg upgrade_name "$upgrade_name" '{ $upgrade_name }')" + elif [[ "$test_type" == "ACTIONS_CONFIG_PANEL" ]] && [[ "$test_arg" == "actions" ]] + then + extra="$(jq -n --arg actions "$action_infos" '{ $actions }')" + elif [[ "$test_type" == "ACTIONS_CONFIG_PANEL" ]] && [[ "$test_arg" == "actions" ]] + then + extra="$(jq -n --arg configpanel "$configpanel_infos" '{ $configpanel }')" + fi + + jq -n -f "$TEST_CONTEXT/tests/$test_id.json" \ + --arg test_serie "$test_serie" \ + --arg test_type "$test_type" \ + --arg test_arg "$test_arg" \ + --arg preinstall_template "$preinstall_template" \ + --arg install_args "$_install_args" \ + --argjson "$extra" \ + '{ $test_serie, $test_type, $test_arg, $preinstall_template, $install_args, $extra }' + } + + # For not-the-main-test-serie, we only consider testing the install and + # upgrade from previous commits + if [[ "$test_serie_id" != "1" ]] + then + is_test_enabled setup_sub_dir && add_test "TEST_INSTALL" "subdir" + is_test_enabled setup_root && add_test "TEST_INSTALL" "root" + is_test_enabled setup_nourl && add_test "TEST_INSTALL" "nourl" + grep "^upgrade=1" "$TEST_CONTEXT/check_process.tests_infos" | + while IFS= read -r LINE; + do + commit=$(echo $LINE | grep -o "from_commit=.*" | awk -F= '{print $2}') + [ -n "$commit" ] || continue + add_test "TEST_UPGRADE" "$commit" + done + + continue + else + test_serie="default" + fi + + is_test_enabled pkg_linter && add_test "PACKAGE_LINTER" + is_test_enabled setup_sub_dir && add_test "TEST_INSTALL" "subdir" + is_test_enabled setup_root && add_test "TEST_INSTALL" "root" + is_test_enabled setup_nourl && add_test "TEST_INSTALL" "nourl" + is_test_enabled setup_private && add_test "TEST_INSTALL" "private" + is_test_enabled multi_instance && add_test "TEST_MULTI_INSTANCE" + is_test_enabled backup_restore && add_test "TEST_BACKUP_RESTORE" + # Upgrades - grep "^upgrade=1" "$test_serie_dir/check_process.tests_infos" | - while IFS= read -r LINE; + grep "^upgrade=1" "$TEST_CONTEXT/check_process.tests_infos" | + while IFS= read -r LINE; do commit=$(echo $LINE | grep -o "from_commit=.*" | awk -F= '{print $2}') [ -n "$commit" ] || commit="current" - echo "TEST_UPGRADE $commit" >> $test_serie_dir/tests_to_perform - done - + add_test "TEST_UPGRADE" "$commit" + done + # "Advanced" features - is_test_enabled change_url && echo "TEST_CHANGE_URL" >> $test_serie_dir/tests_to_perform - is_test_enabled actions && echo "ACTIONS_CONFIG_PANEL actions" >> $test_serie_dir/tests_to_perform - is_test_enabled config_panel && echo "ACTIONS_CONFIG_PANEL config_panel" >> $test_serie_dir/tests_to_perform + is_test_enabled change_url && add_test "TEST_CHANGE_URL" + is_test_enabled actions && add_test "ACTIONS_CONFIG_PANEL" "actions" + is_test_enabled config_panel && add_test "ACTIONS_CONFIG_PANEL" "config_panel" # Port already used ... do we really need this ... - if grep -q -m1 "port_already_use=1" "$test_serie_dir/check_process.tests_infos" + if grep -q -m1 "port_already_use=1" "$TEST_CONTEXT/check_process.tests_infos" then - local check_port=$(grep -m1 "port_already_use=1" "$test_serie_dir/check_process.tests_infos" | grep -o -E "\([0-9]+\)" | tr -d '()') + local check_port=$(grep -m1 "port_already_use=1" "$TEST_CONTEXT/check_process.tests_infos" | grep -o -E "\([0-9]+\)" | tr -d '()') else local check_port=6660 fi - is_test_enabled port_already_use && echo "TEST_PORT_ALREADY_USED $check_port" >> $test_serie_dir/tests_to_perform - - cat $test_serie_dir/tests_to_perform + is_test_enabled port_already_use && add_test "TEST_PORT_ALREADY_USED" "$check_port" done 3<<< "$(grep "^;; " "$check_process")" @@ -655,34 +663,37 @@ guess_test_configuration() { log_error "Not check_process file found." log_warning "Package check will attempt to automatically guess what tests to run." - local test_serie_id=$(tr -dc A-Za-z0-9 $test_serie_dir/test_serie_name + add_test() { + local test_type="$1" + local test_arg="$2" + test_id="$((test_id+1))" + local extra="{}" - test_series+="$test_serie_id " + jq -n -f "$TEST_CONTEXT/tests/$test_id.json" + --arg test_serie "default" \ + --arg test_type "$test_type" \ + --arg test_arg "$test_arg" \ + --arg preinstall_template "" \ + --arg install_args "$install_args" \ + --argjson "$extra" \ + '{ $test_serie, $test_type, $test_arg, $preinstall_template, $install_args, $extra }' + } + local install_args=$(python "./sub_scripts/manifest_parsing.py" "$package_path/manifest.json" | cut -d ':' -f1,2 | tr ':' '=' | tr '\n' '&') - python "./sub_scripts/manifest_parsing.py" "$package_path/manifest.json" \ - | cut -d ':' -f1,2 | tr ':' '=' | tr '\n' '&' > $test_serie_dir/install_args - - echo "PACKAGE_LINTER" >> $test_serie_dir/tests_to_perform - echo "TEST_INSTALL subdir" >> $test_serie_dir/tests_to_perform - echo "TEST_INSTALL root" >> $test_serie_dir/tests_to_perform - if grep -q "is_public=" $test_serie_dir/install_args + add_test "PACKAGE_LINTER" + add_test "TEST_INSTALL subdir" + add_test "TEST_INSTALL root" + if echo $install_args | grep -q "is_public=" then - echo "TEST_PUBLIC_PRIVATE private" >> $test_serie_dir/tests_to_perform - echo "TEST_PUBLIC_PRIVATE public" >> $test_serie_dir/tests_to_perform + add_test "TEST_INSTALL" "private" fi if grep multi_instance "$package_path/manifest.json" | grep -q true then - echo "TEST_MULTI_INSTANCE" >> $test_serie_dir/tests_to_perform + add_test "TEST_MULTI_INSTANCE" fi - echo "TEST_BACKUP_RESTORE" >> $test_serie_dir/tests_to_perform - echo "TEST_UPGRADE current" >> $test_serie_dir/tests_to_perform + add_test "TEST_BACKUP_RESTORE" + add_test "TEST_UPGRADE current" } #================================================= @@ -696,23 +707,17 @@ run_all_tests() { LXC_INIT - for test_serie_id in $test_series - do - test_serie_dir=$TEST_CONTEXT/$test_serie_id + # Break after the first tests serie + if [ $interactive -eq 1 ]; then + read -p "Press a key to start the tests..." < /dev/tty + fi - # Break after the first tests serie - if [ $interactive -eq 1 ]; then - read -p "Press a key to start the next tests serie..." < /dev/tty - fi + # Launch all tests successively + cat $TEST_CONTEXT/tests/*.json >&3 + RUN_ALL_TESTS $TEST_CONTEXT/tests/ - # Launch all tests successively - RUN_TEST_SERIE $test_serie_dir - - # Print the final results of the tests - COMPUTE_RESULTS_SUMMARY $test_serie_id - - LXC_PURGE_SNAPSHOTS - done + # Print the final results of the tests + COMPUTE_RESULTS_SUMMARY $test_serie_id # Restore the started time for the timer starttime=$complete_start_timer @@ -721,8 +726,6 @@ run_all_tests() { echo "You can find the complete log of these tests in $(realpath $complete_log)" - source "./sub_scripts/notifications.sh" - } [ -e "$check_process" ] \ @@ -731,4 +734,6 @@ run_all_tests() { run_all_tests +LXC_PURGE_SNAPSHOTS + clean_exit 0 diff --git a/sub_scripts/launcher.sh b/sub_scripts/launcher.sh index 8e22f78..e7898d4 100755 --- a/sub_scripts/launcher.sh +++ b/sub_scripts/launcher.sh @@ -131,10 +131,8 @@ LXC_START () { if ! is_lxc_running; then log_debug "Start the LXC container" >> "$complete_log" sudo lxc-start --name=$LXC_NAME --daemon --logfile "./lxc_boot.log" | tee --append "$complete_log" 2>&1 - local avoid_witness=0 else log_debug "A LXC container is already running" - local avoid_witness=1 fi # Try to connect 5 times @@ -152,93 +150,6 @@ LXC_START () { [ "$(uname -m)" == "aarch64" ] && sleep 30 - local failstart=0 - - # Check if the container is running - if ! is_lxc_running; then - log_critical "The LXC container didn't start..." - failstart=1 - if [ $i -ne $max_try ]; then - log_info "Rebooting the container..." - fi - LXC_STOP - # Try to ping security.debian.org to check the connectivity from the container - elif ! ssh $arg_ssh -o ConnectTimeout=60 $LXC_NAME "sudo ping -q -c 2 security.debian.org > /dev/null 2>&1; exit \$?" >> "$complete_log" 2>&1 - then - log_critical "The container failed to connect to internet..." - failstart=1 - if [ $i -ne $max_try ]; then - log_info "Rebooting the container..." - fi - LXC_STOP - # Create files to check if the remove script does not remove them accidentally - else - [ $avoid_witness -eq 0 ] && set_witness_files - - # Break the for loop if the container is ready. - break - fi - - # Fail if the container failed to start - if [ $i -eq $max_try ] && [ $failstart -eq 1 ] - then - send_email () { - # Send an email only if it's a CI environment - if [ $type_exec_env -ne 0 ] - then - ci_path=$(grep "CI_URL=" "./../config" | cut -d= -f2) - local subject="[YunoHost] Container in trouble on $ci_path." - local message="The container failed to start $max_try times on $ci_path. - $lxc_check_result - - Please have a look to the log of lxc_check: - $(cat "./lxc_check.log")" - if [ $lxc_check -eq 2 ]; then - # Add the log of lxc_build - message="$message - - Here the log of lxc_build: - $(cat "./sub_scripts/Build_lxc.log")" - fi - - dest=$(grep 'dest=' "./../config" | cut -d= -f2) - mail -s "$subject" "$dest" <<< "$message" - fi - } - - log_critical "The container failed to start $max_try times..." - log_info "Boot log:\n" - cat "./lxc_boot.log" | tee --append "$complete_log" - log_info "lxc_check will try to fix the container..." - ./sub_scripts/lxc_check.sh --no-lock | tee "./lxc_check.log" - # PIPESTATUS is an array with the exit code of each command followed by a pipe - local lxc_check=${PIPESTATUS[0]} - LXC_INIT - if [ $lxc_check -eq 0 ]; then - local lxc_check_result="The container seems to be ok, according to lxc_check." - log_success "$lxc_check_result" - send_email - i=0 - elif [ $lxc_check -eq 1 ]; then - local lxc_check_result="An error has happened with the host. Please check the configuration." - log_critical "$lxc_check_result" - send_email - stop_timer 1 - return 1 - elif [ $lxc_check -eq 2 ]; then - local lxc_check_result="The container is broken, it will be rebuilt." - log_critical "$lxc_check_result" - ./sub_scripts/lxc_build.sh - LXC_INIT - send_email - i=0 - elif [ $lxc_check -eq 3 ]; then - local lxc_check_result="The container has been fixed by lxc_check." - log_success "$lxc_check_result" - send_email - i=0 - fi - fi done stop_timer 1 start_timer @@ -276,26 +187,3 @@ LOAD_LXC_SNAPSHOT () { sudo rsync --acls --archive --delete --executability --itemize-changes --xattrs "$LXC_SNAPSHOTS/$snapname/rootfs/" "$LXC_ROOTFS/" > /dev/null 2>> "$complete_log" } -LXC_TURNOFF () { - # Disable LXC network - - log_debug "Disable iptables rules." - if sudo iptables --check FORWARD --in-interface $LXC_BRIDGE --out-interface $MAIN_NETWORK_INTERFACE --jump ACCEPT 2> /dev/null - then - sudo iptables --delete FORWARD --in-interface $LXC_BRIDGE --out-interface $MAIN_NETWORK_INTERFACE --jump ACCEPT >> "$complete_log" 2>&1 - fi - if sudo iptables --check FORWARD --in-interface $MAIN_NETWORK_INTERFACE --out-interface $LXC_BRIDGE --jump ACCEPT 2> /dev/null - then - sudo iptables --delete FORWARD --in-interface $MAIN_NETWORK_INTERFACE --out-interface $LXC_BRIDGE --jump ACCEPT | tee --append "$complete_log" 2>&1 - fi - if sudo iptables --table nat --check POSTROUTING --source $LXC_NETWORK.0/24 --jump MASQUERADE 2> /dev/null - then - sudo iptables --table nat --delete POSTROUTING --source $LXC_NETWORK.0/24 --jump MASQUERADE | tee --append "$complete_log" 2>&1 - fi - - log_debug "Disable the network bridge." - if sudo ifquery $LXC_BRIDGE --state > /dev/null - then - sudo ifdown --force $LXC_BRIDGE | tee --append "$complete_log" 2>&1 - fi -} diff --git a/sub_scripts/lxc_check.sh b/sub_scripts/lxc_check.sh deleted file mode 100755 index 0d5ad80..0000000 --- a/sub_scripts/lxc_check.sh +++ /dev/null @@ -1,473 +0,0 @@ -#!/bin/bash -# Test différents aspect du conteneur pour chercher d'éventuelles erreurs. -# Et tente de réparer si possible... - -cd $(dirname $(realpath $0) | sed 's@/sub_scripts$@@g') -source "./sub_scripts/common.sh" - -no_lock=0 -if [ "$1" == "--no-lock" ]; then - no_lock=1 -fi - -# Exit with the correct exit code -remove_lock () { - rm -f "$lock_file" -} - -exit_rebuild () { - remove_lock - exit 2 -} - -exit_retry () { - remove_lock - exit 3 -} - -exit_sane () { - remove_lock - exit 0 -} - -STOP_CONTAINER () { - echo "Arrêt du conteneur $LXC_NAME" - sudo lxc-stop -n $LXC_NAME -} - -START_NETWORK () { - echo "Initialisation du réseau pour le conteneur." - sudo ifup $LXC_BRIDGE --interfaces=/etc/network/interfaces.d/$LXC_BRIDGE - # Activation des règles iptables - sudo iptables -A FORWARD -i $LXC_BRIDGE -o $MAIN_NETWORK_INTERFACE -j ACCEPT - sudo iptables -A FORWARD -i $MAIN_NETWORK_INTERFACE -o $LXC_BRIDGE -j ACCEPT - sudo iptables -t nat -A POSTROUTING -s $LXC_NETWORK.0/24 -j MASQUERADE -} - -STOP_NETWORK () { - echo "Arrêt du réseau pour le conteneur." - sudo iptables -D FORWARD -i $LXC_BRIDGE -o $MAIN_NETWORK_INTERFACE -j ACCEPT > /dev/null 2>&1 - sudo iptables -D FORWARD -i $MAIN_NETWORK_INTERFACE -o $LXC_BRIDGE -j ACCEPT > /dev/null 2>&1 - sudo iptables -t nat -D POSTROUTING -s $LXC_NETWORK.0/24 -j MASQUERADE > /dev/null 2>&1 - sudo ifdown --force $LXC_BRIDGE > /dev/null 2>&1 -} - -REBOOT_CONTENEUR () { - echo "Redémarrage du conteneur." - STOP_CONTAINER - STOP_NETWORK - START_NETWORK - echo "Démarrage du conteneur." - sudo lxc-start -n $LXC_NAME -d > /dev/null 2>&1 # Démarre le conteneur - sudo lxc-wait -n $LXC_NAME -s 'RUNNING' -t 60 # Attend pendant 60s maximum que le conteneur démarre -} - -CHECK_CONTAINER () { - echo "Test de démarrage du conteneur $LXC_NAME" - sudo lxc-start -n $LXC_NAME -d > /dev/null 2>&1 # Démarre le conteneur - sudo lxc-wait -n $LXC_NAME -s 'RUNNING' -t 60 # Attend pendant 60s maximum que le conteneur démarre -# sudo lxc-ls -f - if [ $(sudo lxc-info --name $LXC_NAME | grep -c "RUNNING") -ne 1 ]; then - check_repair=1 - return 1 # Renvoi 1 si le démarrage du conteneur a échoué - else - return 0 # Renvoi 0 si le démarrage du conteneur a réussi - fi -} - -RESTORE_SNAPSHOT () { - echo -e "\e[91mRestauration du snapshot du conteneur $LXC_NAME.\e[0m" - check_repair=1 - sudo lxc-snapshot -r snap0 -n $LXC_NAME - CHECK_CONTAINER - STATUS=$? - if [ "$STATUS" -eq 1 ]; then - echo -e "\e[91m> Conteneur $LXC_NAME en défaut.\e[0m" - STOP_CONTAINER - return 1 - else - echo -e "\e[92m> Conteneur $LXC_NAME en état de marche.\e[0m" - return 0 - fi -} - -RESTORE_ARCHIVE_SNAPSHOT () { - if ! test -e "/var/lib/lxcsnaps/$LXC_NAME/snap0.tar.gz"; then - echo -e "\e[91mAucune archive de snapshot pour le conteneur $LXC_NAME.\e[0m" - return 1 - fi - echo -e "\e[91mRestauration du snapshot archivé pour le conteneur $LXC_NAME.\e[0m" - check_repair=1 - echo -e "\e[91mSuppression du snapshot.\e[0m" - sudo lxc-snapshot -n $LXC_NAME -d snap0 - echo -e "\e[91mDécompression de l'archive.\e[0m" - sudo tar -x --acls --xattrs -f /var/lib/lxcsnaps/$LXC_NAME/snap0.tar.gz -C / - RESTORE_SNAPSHOT - return $? -} - -RESTORE_CONTAINER () { - # Tente des restaurations du conteneur - # Restauration des snapshots - STOP_CONTAINER - if [ $START_STATUS -eq 1 ]; then - RESTORE_SNAPSHOT - START_STATUS=$? - fi - # Restauration des archives des snapshots - if [ $START_STATUS -eq 1 ]; then - RESTORE_ARCHIVE_SNAPSHOT - START_STATUS=$? - fi - # Résultats finaux - if [ $START_STATUS -eq 1 ]; then - echo -e "\e[91m\n> Le conteneur $LXC_NAME1 n'a pas pu être réparé...\nIl est nécessaire de détruire et de reconstruire le conteneur.\e[0m" - exit_rebuild - else - echo -e "\e[92m\n> Le conteneur démarre correctement.\e[0m" - fi -} - -LXC_NETWORK_CONFIG () { - lxc_network=0 - if [ $(lsb_release -sc) != buster ] - then - network_prefix=lxc.network - else - network_prefix=lxc.net.0 - fi - if ! sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q "^$network_prefix.type = veth" - then - lxc_network=1 # Si la ligne de la config réseau est absente, c'est une erreur. - check_repair=1 - if sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q ".*$network_prefix.type" - then # Si la ligne est incorrecte, elle est corrigée. - sudo sed -i "s/.*$network_prefix.type.*/$network_prefix.type = veth/g" /var/lib/lxc/$LXC_NAME/config - else # Sinon elle est ajoutée. - echo "$network_prefix.type = veth" | sudo tee -a /var/lib/lxc/$LXC_NAME/config - fi - fi - if ! sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q "^$network_prefix.flags = up" - then - lxc_network=1 - check_repair=1 - if sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q ".*$network_prefix.flags" - then - sudo sed -i "s/.*$network_prefix.flags.*/$network_prefix.flags = up/g" /var/lib/lxc/$LXC_NAME/config - else - echo "$network_prefix.flags = up" | sudo tee -a /var/lib/lxc/$LXC_NAME/config - fi - fi - if ! sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q "^$network_prefix.link = $LXC_BRIDGE" - then - lxc_network=1 - check_repair=1 - if sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q ".*$network_prefix.link" - then - sudo sed -i "s/.*$network_prefix.link.*/$network_prefix.link = $LXC_BRIDGE" /var/lib/lxc/$LXC_NAME/config - else - echo "$network_prefix.link = $LXC_BRIDGE" | sudo tee -a /var/lib/lxc/$LXC_NAME/config - fi - fi - if ! sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q "^$network_prefix.name = eth0" - then - lxc_network=1 - check_repair=1 - if sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q ".*$network_prefix.name" - then - sudo sed -i "s/.*$network_prefix.name.*/$network_prefix.name = eth0/g" /var/lib/lxc/$LXC_NAME/config - else - echo "$network_prefix.name = eth0" | sudo tee -a /var/lib/lxc/$LXC_NAME/config - fi - fi - if ! sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q "^$network_prefix.veth.pair = $LXC_NAME" - then - lxc_network=1 - check_repair=1 - if sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q ".*$network_prefix.veth.pair" - then - sudo sed -i "s/.*$network_prefix.veth.pair.*/$network_prefix.veth.pair = $LXC_NAME/g" /var/lib/lxc/$LXC_NAME/config - else - echo "$network_prefix.veth.pair = $LXC_NAME" | sudo tee -a /var/lib/lxc/$LXC_NAME/config - fi - fi - if ! sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q "^$network_prefix.hwaddr = 00:FF:AA:00:00:01" - then - lxc_network=1 - check_repair=1 - if sudo cat /var/lib/lxc/$LXC_NAME/config | grep -q ".*$network_prefix.hwaddr" - then - sudo sed -i "s/.*$network_prefix.hwaddr.*/$network_prefix.hwaddr = 00:FF:AA:00:00:01/g" /var/lib/lxc/$LXC_NAME/config - else - echo "$network_prefix.hwaddr = 00:FF:AA:00:00:01" | sudo tee -a /var/lib/lxc/$LXC_NAME/config - fi - fi - if [ $lxc_network -eq 1 ] - then - echo -e "\e[91mLa configuration réseau LXC du conteneur est incorrecte et a été corrigée.\e[0m" - else - echo -e "\e[92mLa configuration réseau LXC du conteneur est correcte.\e[0m" - fi -} - -[ $no_lock -eq 0 ] && touch "$lock_file" - -STOP_CONTAINER -STOP_NETWORK -check_repair=0 - -### Test de la configuration réseau -echo -e "\e[1m> Test de la configuration réseau du côté de l'hôte:\e[0m" -CREATE_BRIDGE () { - echo | sudo tee /etc/network/interfaces.d/$LXC_BRIDGE < Test le démarrage du conteneur:\e[0m" -START_NETWORK -LXC_NETWORK_CONFIG -CHECK_CONTAINER -START_STATUS=$? -if [ "$START_STATUS" -eq 1 ]; then - RESTORE_CONTAINER -else - echo -e "\e[92mLe conteneur a démarré correctement.\e[0m" -fi - - -# Vérifie la connexion internet. -echo -e "\e[1m\n> Test de l'accès internet depuis l'hôte:\e[0m" -ping -q -c 2 yunohost.org > /dev/null 2>&1 -if [ "$?" -ne 0 ]; then # En cas d'échec de connexion, tente de pinger un autre domaine pour être sûr - ping -q -c 2 framasoft.org > /dev/null 2>&1 - if [ "$?" -ne 0 ]; then # En cas de nouvel échec de connexion. On considère que la connexion est down... - critical "L'hôte semble ne pas avoir accès à internet. La connexion internet est indispensable." - fi -fi -echo -e "\e[92mL'hôte dispose d'un accès à internet.\e[0m" - -### Test le réseau du conteneur -echo -e "\e[1m\n> Test de l'accès internet depuis le conteneur:\e[0m" -CHECK_LXC_NET () { - RUN_INSIDE_LXC ping -q -c 2 yunohost.org > /dev/null 2>&1 \ - || RUN_INSIDE_LXC ping -q -c 2 framasoft.org > /dev/null 2>&1 \ - || return 1 - - return 0 -} - -lxc_net=1 -lxc_net_check=0 # Passe sur les différents tests -while test "$lxc_net" -eq 1 # Boucle tant que la connexion internet du conteneur n'est pas réparée. -do - REBOOT_CONTENEUR - if [ "$(uname -m)" == "aarch64" ] - then - sleep 30 - else - sleep 3 - fi - sudo lxc-ls -f - CHECK_LXC_NET - lxc_net=$? - if [ "$lxc_net" -eq 1 ]; then - if [ "$lxc_net_check" -eq 4 ] - then - echo -e "\e[91mImpossible de rétablir la connexion internet du conteneur.\e[0m" - exit_rebuild - fi - echo -e "\e[91mLe conteneur LXC n'accède pas à internet...\e[0m" - check_repair=1 - if [ "$lxc_net_check" -eq 0 ] - then - # Test la présence du fichier de config du kernel - lxc_net_check=1 - if ! test -e /etc/sysctl.d/lxc_pchecker.conf - then - echo -e "\e[91mLe fichier de configuration du kernel pour l'ip forwarding est introuvable.\nIl va être recréé.\e[0m" - echo "net.ipv4.ip_forward=1" | sudo tee /etc/sysctl.d/lxc_pchecker.conf - sudo sysctl -p /etc/sysctl.d/lxc_pchecker.conf - continue - else - echo -e "\e[92mLe fichier de configuration du kernel pour l'ip forwarding est présent.\e[0m" - fi - fi - if [ "$lxc_net_check" -eq 1 ] - then - # Test l'ip forwarding - lxc_net_check=2 - if ! sudo sysctl -a | grep -q "net.ipv4.ip_forward = " || [ $(sudo sysctl -n net.ipv4.ip_forward) -ne 1 ] - then - echo -e "\e[91mL'ip forwarding n'est pas activé. Correction en cours...\e[0m" - echo "net.ipv4.ip_forward=1" | sudo tee /etc/sysctl.d/lxc_pchecker.conf - sudo sysctl -p /etc/sysctl.d/lxc_pchecker.conf - continue - else - echo -e "\e[92mL'ip forwarding est activé.\e[0m" - fi - fi - if [ "$lxc_net_check" -eq 2 ] - then - # Vérifie la config réseau LXC du conteneur - lxc_net_check=3 - LXC_NETWORK_CONFIG - fi - if [ "$lxc_net_check" -eq 3 ] - then - lxc_net_check=4 - # Vérifie la config réseau LXC à l'intérieur du conteneur - if ! sudo test -e /var/lib/lxc/$LXC_NAME/rootfs/etc/network/interfaces - then - echo -e "\e[91mLe fichier network/interfaces du conteneur est introuvable.\nIl va être recréé.\e[0m" - else - echo -e "\e[92mLe fichier network/interfaces du conteneur est présent.\nMais il va être réécrit par précaution.\e[0m" - fi - echo -e "auto lo\niface lo inet loopback\nauto eth0\niface eth0 inet static\n\taddress $LXC_NETWORK.2/24\n\tgateway $LXC_NETWORK.1" | sudo tee /var/lib/lxc/$LXC_NAME/rootfs/etc/network/interfaces - fi - else - echo -e "\e[92mLe conteneur dispose d'un accès à internet.\e[0m" - fi -done - - -### Test l'accès ssh sur le conteneur -echo -e "\e[1m\n> Test de l'accès ssh:\e[0m" -assert_we_are_the_setup_user - -sudo lxc-ls -f -sleep 3 -ssh -t $LXC_NAME "exit 0" # Test une connexion ssh -if [ "$?" -eq 0 ]; then - echo -e "\e[92mLa connexion ssh est fonctionnelle.\e[0m" -else - echo -e "\e[91mÉchec de la connexion ssh. Reconfiguration de l'accès ssh.\e[0m" - check_repair=1 - ssh -t $LXC_NAME -v "exit 0" # Répète la connexion ssh pour afficher l'erreur. - - echo "Suppression de la config ssh actuelle pour le conteneur." - rm -f $HOME/.ssh/$LXC_NAME $HOME/.ssh/$LXC_NAME.pub - - BEGIN_LINE=$(cat $HOME/.ssh/config | grep -n "# ssh $LXC_NAME" | cut -d':' -f 1) - sed -i "$BEGIN_LINE,/^IdentityFile/d" $HOME/.ssh/config - - ssh-keygen -f "$HOME/.ssh/known_hosts" -R $LXC_NETWORK.2 - - echo "Création de la clé ssh." - ssh-keygen -t dsa -f $HOME/.ssh/$LXC_NAME -P '' - sudo cp $HOME/.ssh/$LXC_NAME.pub /var/lib/lxc/$LXC_NAME/rootfs/home/pchecker/.ssh/authorized_keys - RUN_INSIDE_LXC chown pchecker: -R /home/pchecker/.ssh - echo "Ajout de la config ssh." - - echo | tee -a $HOME/.ssh/config <> ~/.ssh/known_hosts # Récupère la clé publique pour l'ajouter au known_hosts - ssh -t $LXC_NAME -v "exit 0" > /dev/null # Test à nouveau la connexion ssh - if [ "$?" -eq 0 ]; then - echo -e "\e[92mLa connexion ssh est retablie.\e[0m" - else - echo -e "\e[91mÉchec de la réparation de la connexion ssh.\nIl est nécessaire de détruire et de reconstruire le conteneur.\e[0m" - fi -fi - - -### Vérifie que Yunohost est installé -echo -e "\e[1m\n> Vérifie que Yunohost est installé dans le conteneur:\e[0m" -RUN_INSIDE_LXC sudo yunohost -v -if [ "$?" -ne 0 ]; then # Si la commande échoue, il y a un problème avec Yunohost - echo -e "\e[91mYunohost semble mal installé. Il est nécessaire de détruire et de reconstruire le conteneur.\e[0m" - exit_rebuild -else - echo -e "\e[92mYunohost est installé correctement.\e[0m" -fi - -STOP_CONTAINER -STOP_NETWORK - -echo -e "\e[92m\nLe conteneur ne présente aucune erreur.\e[0m" -if [ "$check_repair" -eq 1 ]; then - echo -e "\e[91mMais des réparations ont été nécessaires. Refaire un test pour s'assurer que tout est correct...\e[0m" - exit_retry -fi - -exit_sane diff --git a/sub_scripts/notifications.sh b/sub_scripts/notifications.sh deleted file mode 100755 index 7dbd417..0000000 --- a/sub_scripts/notifications.sh +++ /dev/null @@ -1,196 +0,0 @@ -#!/bin/bash - -#================================================= -# Determine if it's a CI environment -#================================================= - -# By default, it's a standalone execution. -type_exec_env=0 -# CI environment -[ -e "./../config" ] && type_exec_env=1 -# Official CI environment -[ -e "./../auto_build/auto.conf" ] && type_exec_env=2 - - -# Try to find a optionnal email address to notify the maintainer -# In this case, this email will be used instead of the email from the manifest. -notification_email="$(grep -m1 "^Email=" $TEST_CONTEXT/check_process.options | cut -d '=' -f2)" - -# Try to find a optionnal option for the grade of notification -notification_mode="$(grep -m1 "^Notification=" $TEST_CONTEXT/check_process.options | cut -d '=' -f2)" - - -#================================================= -# Notification grade -#================================================= - -notif_grade () { - # Check the level of notification from the check_process. - # Echo 1 if the grade is reached - - compare_grade () - { - if echo "$notification_mode" | grep -q "$1"; then - echo 1 - else - echo 0 - fi - } - - case "$1" in - all) - # If 'all' is needed, only a grade of notification at 'all' can match - compare_grade "^all$" - ;; - change) - # If 'change' is needed, notification at 'all' or 'change' can match - compare_grade "^all$\|^change$" - ;; - down) - # If 'down' is needed, notification at 'all', 'change' or 'down' match - compare_grade "^all$\|^change$\|^down$" - ;; - *) - echo 0 - ;; - esac -} - -#================================================= -# Inform of the results by XMPP and/or by mail -#================================================= - -send_mail=0 - -# If package check it's in the official CI environment -# Check the level variation -if [ $type_exec_env -eq 2 ] -then - - # Get the job name, stored in the work_list - job=$(head -n1 "./../work_list" | cut -d ';' -f 3) - - # Identify the type of test, stable (0), testing (1) or unstable (2) - # Default stable - test_type=0 - message="" - if echo "$job" | grep -q "(testing)" - then - message="(TESTING) " - test_type=1 - elif echo "$job" | grep -q "(unstable)" - then - message="(UNSTABLE) " - test_type=2 - fi - - # Build the log path (and replace all space by %20 in the job name) - if [ -n "$job" ]; then - if systemctl list-units | grep --quiet jenkins - then - job_log="/job/${job// /%20}/lastBuild/console" - elif systemctl list-units | grep --quiet yunorunner - then - # Get the directory of YunoRunner - ci_dir="$(grep WorkingDirectory= /etc/systemd/system/yunorunner.service | cut -d= -f2)" - # List the jobs from YunoRunner and grep the job (without Community or Official). - job_id="$(cd "$ci_dir"; ve3/bin/python ciclic list | grep ${job%% *} | head -n1)" - # Keep only the id of the job, by removing everything after - - job_id="${job_id%% -*}" - # And remove any space before the id. - job_id="${job_id##* }" - job_log="/job/$job_id" - fi - fi - - # If it's a test on testing or unstable - if [ $test_type -gt 0 ] - then - # Remove unstable or testing of the job name to find its stable version in the level list - job="${job% (*)}" - fi - - # Get the previous level, found in the file list_level_stable - previous_level=$(grep "^$job:" "./../auto_build/list_level_stable" | cut -d: -f2) - - # Print the variation of the level. If this level is different than 0 - if [ $global_level -gt 0 ] - then - message="${message}Application $app_id" - # If non previous level was found - if [ -z "$previous_level" ]; then - message="$message just reach the level $global_level" - send_mail=$(notif_grade all) - # If the level stays the same - elif [ $global_level -eq $previous_level ]; then - message="$message stays at level $global_level" - # Need notification at 'all' to notify by email - send_mail=$(notif_grade all) - # If the level go up - elif [ $global_level -gt $previous_level ]; then - message="$message rise from level $previous_level to level $global_level" - # Need notification at 'change' to notify by email - send_mail=$(notif_grade change) - # If the level go down - elif [ $global_level -lt $previous_level ]; then - message="$message go down from level $previous_level to level $global_level" - # Need notification at 'down' to notify by email - send_mail=$(notif_grade down) - fi - fi -fi - -# If the app completely failed and obtained 0 -if [ $global_level -eq 0 ] -then - message="${message}Application $app_id has completely failed the continuous integration tests" - - # Always send an email if the app failed - send_mail=1 -fi - -subject="[YunoHost] $message" - -# If the test was perform in the official CI environment -# Add the log address -# And inform with xmpp -if [ $type_exec_env -eq 2 ] -then - - # Build the address of the server from auto.conf - ci_path=$(grep "DOMAIN=" "./../auto_build/auto.conf" | cut -d= -f2)/$(grep "CI_PATH=" "./../auto_build/auto.conf" | cut -d= -f2) - - # Add the log adress to the message - message="$message on https://$ci_path$job_log" - - # Send a xmpp notification on the chat room "apps" - # Only for a test with the stable version of YunoHost - if [ $test_type -eq 0 ] - then - "./../auto_build/xmpp_bot/xmpp_post.sh" "$message" > /dev/null 2>&1 - fi -fi - -# Send a mail to main maintainer according to notification option in the check_process. -# Only if package check is in a CI environment (Official or not) -if [ $type_exec_env -ge 1 ] && [ $send_mail -eq 1 ] -then - - # Add a 'from' header for the official CI only. - # Apparently, this trick is not needed anymore !? - # if [ $type_exec_env -eq 2 ]; then - # from_yuno="-a \"From: yunohost@yunohost.org\"" - # fi - - # Get the maintainer email from the manifest. If it doesn't found if the check_process - if [ -z "$notification_email" ]; then - notification_email=$(grep '\"email\": ' "$package_path/manifest.json" | cut -d '"' -f 4) - fi - - # Send the message by mail, if a address has been find - if [ -n "$notification_email" ]; then - mail $from_yuno -s "$subject" "$notification_email" <<< "$message" - fi -fi - - diff --git a/sub_scripts/testing_process.sh b/sub_scripts/testing_process.sh index 949b2ee..c827671 100755 --- a/sub_scripts/testing_process.sh +++ b/sub_scripts/testing_process.sh @@ -17,7 +17,7 @@ break_before_continue () { start_test () { - total_number_of_test=$(cat $test_serie_dir/tests_to_perform | wc -l) + total_number_of_test=$(ls $TEST_CONTEXT/tests/*.json | wc -l) log_title "$1 [Test $current_test_number/$total_number_of_test]" @@ -38,42 +38,22 @@ RUN_YUNOHOST_CMD() { check_witness_files && return $returncode || return 2 } -SET_RESULT() { - [ $2 -eq 1 ] && log_report_test_success || log_report_test_failed - sed --in-place "s/RESULT_$1=.*$/RESULT_$1=$2/g" $test_serie_dir/results -} - -SET_RESULT_IF_NONE_YET() { - [ $2 -eq 1 ] && log_report_test_success || log_report_test_failed - if [ $(GET_RESULT $1) -eq 0 ] - then - sed --in-place "s/RESULT_$1=.*$/RESULT_$1=$2/g" $test_serie_dir/results - fi -} - -GET_RESULT() { - grep "RESULT_$1=" $test_serie_dir/results | awk -F= '{print $2}' -} - -at_least_one_install_succeeded () { - - [ "$(GET_RESULT check_subdir)" -eq 1 ] \ - || [ "$(GET_RESULT check_root)" -eq 1 ] \ - || [ "$(GET_RESULT check_nourl)" -eq 1 ] \ - || { log_error "All installs failed, therefore the following tests cannot be performed..."; - return 1; } -} - this_is_a_web_app () { + # Usually the fact that we test "nourl" - # installs should be a good indicator for this - grep -q "TEST_INSTALL nourl" $test_serie_dir/tests_to_perform && return 1 + # installs should be a good indicator for the fact that it's not a webapp + for TEST in $(ls $TEST_CONTEXT/tests/*.json) + do + jq -e '. | select(.test_type == "TEST_INSTALL") | select(.test_arg == "nourl")' $TEST \ + && return 1 + done + + return 0 } default_install_path() { - this_is_a_web_app && echo "" \ - || [ "$(GET_RESULT check_subdir)" -eq 1 ] && echo "/path " \ - || echo "/" + # All webapps should be installable at the root of a domain ? + this_is_a_web_app && echo "/" || echo "" } #================================================= @@ -81,8 +61,8 @@ default_install_path() { #================================================= INSTALL_APP () { - - local install_args="$(cat "$test_serie_dir/install_args")" + local install_args="$(jq '.install_args' $current_test_infos)" + local preinstall_template="$(jq '.preinstall_template' $current_test_infos)" # We have default values for domain, user and is_public, but these # may still be overwritten by the args ($@) @@ -94,15 +74,14 @@ INSTALL_APP () { done # Exec the pre-install instruction, if there one - preinstall_script_template="$test_serie_dir/preinstall.sh.template" - if [ -e "$preinstall_script_template" ] && [ -n "$(cat $preinstall_script_template)" ] + if [ -n "$preinstall_template" ] then log_small_title "Pre installation request" # Start the lxc container LXC_START "true" # Copy all the instructions into a script - preinstall_script="$test_serie_dir/preinstall.sh" - cp "$preinstall_script_template" "$preinstall_script" + local preinstall_script="$TEST_CONTEXT/preinstall.sh" + echo "$preinstall_template" > "$preinstall_script" chmod +x "$preinstall_script" # Hydrate the template with variables sed -i "s/\$USER/$TEST_USER/" "$preinstall_script" @@ -175,7 +154,7 @@ VALIDATE_THAT_APP_CAN_BE_ACCESSED () { local check_domain=$1 local check_path=$2 - local expected_to_be=${3} # Can be empty, public or private, later used to check if it's okay to end up on the portal + local install_type=${3} # Can be anything or 'private', later used to check if it's okay to end up on the portal local app_id_to_check=${4:-$app_id} local curl_error=0 @@ -188,14 +167,13 @@ VALIDATE_THAT_APP_CAN_BE_ACCESSED () { log_small_title "Validating that the app can (or cannot) be accessed with its url..." # Force a skipped_uris if public mode is not set - if [ -z "$expected_to_be" ] + if [ "$install_type" != 'private' ] then log_debug "Forcing public access using a skipped_uris setting" # Add a skipped_uris on / for the app RUN_YUNOHOST_CMD "app setting $app_id_to_check skipped_uris -v \"/\"" # Regen the config of sso RUN_YUNOHOST_CMD "app ssowatconf" - expected_to_be="public" fi # Try to access to the url in 2 times, with a final / and without @@ -308,11 +286,11 @@ VALIDATE_THAT_APP_CAN_BE_ACCESSED () { curl --location --insecure --silent $check_domain$check_path../html/alias_traversal.html \ | grep "title" | grep --quiet "alias_traversal test" \ && log_error "Issue alias_traversal detected ! Please see here https://github.com/YunoHost/example_ynh/pull/45 to fix that." \ - && SET_RESULT alias_traversal 1 + && SET_RESULT "failure" alias_traversal [ "$curl_error" -eq 0 ] || return 1 - [ "$expected_to_be" == "public" ] && [ $fell_on_sso_portal -eq 0 ] || return 2 - [ "$expected_to_be" == "private" ] && [ $fell_on_sso_portal -eq 1 ] || return 2 + [ "$install_type" != "private" ] && [ $fell_on_sso_portal -eq 0 ] || return 2 + [ "$install_type" == "private" ] && [ $fell_on_sso_portal -eq 1 ] || return 2 return 0 } @@ -325,74 +303,66 @@ TEST_INSTALL () { # $1 = install type local install_type=$1 - [ "$install_type" = "subdir" ] && { start_test "Installation in a sub path"; local check_path=/path; } - [ "$install_type" = "root" ] && { start_test "Installation on the root"; local check_path=/; } - [ "$install_type" = "nourl" ] && { start_test "Installation without url access"; local check_path=""; } + local check_path="/" + local is_public="1" + [ "$install_type" = "subdir" ] && { start_test "Installation in a sub path"; local check_path=/path; } + [ "$install_type" = "root" ] && { start_test "Installation on the root"; } + [ "$install_type" = "nourl" ] && { start_test "Installation without url access"; local check_path=""; } + [ "$install_type" = "private" ] && { start_test "Installation in private mode"; local is_public="0"; } local snapname=snap_${install_type}install LOAD_LXC_SNAPSHOT snap0 # Install the application in a LXC container - INSTALL_APP "path=$check_path" \ - && VALIDATE_THAT_APP_CAN_BE_ACCESSED $SUBDOMAIN $check_path + INSTALL_APP "path=$check_path" "is_public=$is_public" \ + && VALIDATE_THAT_APP_CAN_BE_ACCESSED $SUBDOMAIN $check_path $install_type local install=$? + [ $install -eq 0 ] || return 1 + # Create the snapshot that'll be used by other tests later - [ $install -eq 0 ] \ + [ "$install_type" != "private" ] \ && [ ! -e "$LXC_SNAPSHOTS/$snapname" ] \ && log_debug "Create a snapshot after app install" \ && CREATE_LXC_SNAPSHOT $snapname # Remove and reinstall the application - [ $install -eq 0 ] \ - && REMOVE_APP \ + REMOVE_APP \ && log_small_title "Reinstalling after removal." \ - && INSTALL_APP "path=$check_path" \ - && VALIDATE_THAT_APP_CAN_BE_ACCESSED $SUBDOMAIN $check_path + && INSTALL_APP "path=$check_path" "is_public=$is_public" \ + && VALIDATE_THAT_APP_CAN_BE_ACCESSED $SUBDOMAIN $check_path $install_type - # Reinstall the application after the removing - # Try to resintall only if the first install is a success. - [ $? -eq 0 ] \ - && SET_RESULT check_$install_type 1 \ - || SET_RESULT check_$install_type -1 - - break_before_continue + return $? } TEST_UPGRADE () { local commit=$1 - if [ "$commit" == "current" ] + # FIXME FIXME FIXME FIXME : fetch upgrade name, specific upgrade args + + if [ "$commit" == "" ] then start_test "Upgrade from the same version" else - specific_upgrade_args="$(grep "^manifest_arg=" "$test_serie_dir/upgrades/$commit" | cut -d'=' -f2-)" - upgrade_name=$(grep "^name=" "$test_serie_dir/upgrades/$commit" | cut -d'=' -f2) - + upgrade_name="$(jq '.extra.upgrade_name' $current_test_infos)" [ -n "$upgrade_name" ] || upgrade_name="commit $commit" start_test "Upgrade from $upgrade_name" fi - at_least_one_install_succeeded || return + at_least_one_install_succeeded || return 1 local check_path=$(default_install_path) # Install the application in a LXC container log_small_title "Preliminary install..." - if [ "$commit" == "current" ] + if [ "$commit" == "" ] then # If no commit is specified, use the current version. LOAD_SNAPSHOT_OR_INSTALL_APP "$check_path" local ret=$? else - # Get the arguments of the manifest for this upgrade. - if [ -n "$specific_upgrade_args" ]; then - cp "$test_serie_dir/install_args" "$test_serie_dir/install_args.bkp" - echo "$specific_upgrade_args" > "$test_serie_dir/install_args" - fi - # Make a backup of the directory # and Change to the specified commit sudo cp -a "$package_path" "${package_path}_back" @@ -404,17 +374,13 @@ TEST_UPGRADE () { INSTALL_APP "path=$check_path" local ret=$? - if [ -n "$specific_upgrade_args" ]; then - mv "$test_serie_dir/install_args.bkp" "$test_serie_dir/install_args" - fi - # Then replace the backup sudo rm -r "$package_path" sudo mv "${package_path}_back" "$package_path" fi - # Check if the install had work - [ $ret -eq 0 ] || { log_error "Initial install failed... upgrade test ignore"; LXC_STOP; continue; } + # Check if the install worked + [ $ret -eq 0 ] || { log_error "Initial install failed... upgrade test ignore"; LXC_STOP; return 1; } log_small_title "Upgrade..." @@ -422,83 +388,7 @@ TEST_UPGRADE () { RUN_YUNOHOST_CMD "app upgrade $app_id -f ./app_folder/" \ && VALIDATE_THAT_APP_CAN_BE_ACCESSED $SUBDOMAIN $check_path - if [ $? -eq 0 ] - then - SET_RESULT_IF_NONE_YET check_upgrade 1 - else - SET_RESULT check_upgrade -1 - fi - - # Remove the application - REMOVE_APP -} - -TEST_PUBLIC_PRIVATE () { - - local install_type=$1 - [ "$install_type" = "private" ] && start_test "Installation in private mode" - [ "$install_type" = "public" ] && start_test "Installation in public mode" - - at_least_one_install_succeeded || return - - # Set public or private according to type of test requested - if [ "$install_type" = "private" ]; then - local is_public="0" - local test_name_for_result="check_private" - elif [ "$install_type" = "public" ]; then - local is_public="1" - local test_name_for_result="check_private" - fi - - # Try in 2 times, first in root and second in sub path. - local i=0 - for i in 0 1 - do - # First, try with a root install - if [ $i -eq 0 ] - then - # Check if root installation worked - [ $(GET_RESULT check_root) -eq 1 ] || { log_warning "Root install failed, therefore this test cannot be performed..."; continue; } - - local check_path=/ - - # Second, try with a sub path install - elif [ $i -eq 1 ] - then - # Check if sub path installation worked, or if force_install_ok is setted. - [ $(GET_RESULT check_subdir) -eq 1 ] || { log_warning "Sub path install failed, therefore this test cannot be performed..."; continue; } - - local check_path=/path - fi - - LOAD_LXC_SNAPSHOT snap0 - - # Install the application in a LXC container - INSTALL_APP "is_public=$is_public" "path=$check_path" \ - && VALIDATE_THAT_APP_CAN_BE_ACCESSED $SUBDOMAIN $check_path "$install_type" - - local ret=$? - - # Result code = 2 means that we were expecting the app to be public but it's private or viceversa - if [ $ret -eq 2 ] - then - yunohost_result=1 - [ "$install_type" = "private" ] && log_error "App is not private: it should redirect to the Yunohost portal, but is publicly accessible instead" - [ "$install_type" = "public" ] && log_error "App page is not public: it should be publicly accessible, but redirects to the Yunohost portal instead" - fi - - # Check the result and print SUCCESS or FAIL - if [ $ret -eq 0 ] - then - SET_RESULT_IF_NONE_YET $test_name_for_result 1 - else - SET_RESULT $test_name_for_result -1 - fi - - break_before_continue - - LXC_STOP - done + return $? } TEST_MULTI_INSTANCE () { @@ -506,7 +396,7 @@ TEST_MULTI_INSTANCE () { start_test "Multi-instance installations" # Check if an install have previously work - at_least_one_install_succeeded || return + at_least_one_install_succeeded || return 1 local check_path=$(default_install_path) @@ -519,14 +409,7 @@ TEST_MULTI_INSTANCE () { && VALIDATE_THAT_APP_CAN_BE_ACCESSED $DOMAIN $check_path \ && VALIDATE_THAT_APP_CAN_BE_ACCESSED $SUBDOMAIN $check_path "" ${app_id}__2 - if [ $? -eq 0 ] - then - SET_RESULT check_multi_instance 1 - else - SET_RESULT check_multi_instance -1 - fi - - break_before_continue + return $? } TEST_PORT_ALREADY_USED () { @@ -534,11 +417,11 @@ TEST_PORT_ALREADY_USED () { start_test "Port already used" # Check if an install have previously work - at_least_one_install_succeeded || return - + at_least_one_install_succeeded || return 1 + local check_port=$1 local check_path=$(default_install_path) - + LOAD_LXC_SNAPSHOT snap0 # Build a service with netcat for use this port before the app. @@ -554,20 +437,18 @@ TEST_PORT_ALREADY_USED () { INSTALL_APP "path=$check_path" "port=$check_port" \ && VALIDATE_THAT_APP_CAN_BE_ACCESSED $SUBDOMAIN $check_path - [ $? -eq 0 ] && SET_RESULT check_port 1 || SET_RESULT check_port -1 - - break_before_continue + return $? } - + TEST_BACKUP_RESTORE () { - + # Try to backup then restore the app start_test "Backup/Restore" # Check if an install have previously work - at_least_one_install_succeeded || return - + at_least_one_install_succeeded || return 1 + local check_path=$(default_install_path) # Install the application in a LXC container @@ -575,6 +456,8 @@ TEST_BACKUP_RESTORE () { local ret=$? + local main_result=0 + # Remove the previous residual backups sudo rm -rf $LXC_ROOTFS/home/yunohost.backup/archives @@ -588,23 +471,10 @@ TEST_BACKUP_RESTORE () { # Made a backup of the application RUN_YUNOHOST_CMD "backup create -n Backup_test --apps $app_id" - ret=$? - - if [ $ret -eq 0 ]; then - log_debug "Backup successful" - else - log_error "Backup failed." - fi fi - # Check the result and print SUCCESS or FAIL - if [ $ret -eq 0 ] - then - SET_RESULT_IF_NONE_YET check_backup 1 - else - SET_RESULT check_backup -1 - fi + [ $ret -eq 0 ] || main_result=1 # Grab the backup archive into the LXC container, and keep a copy sudo cp -a $LXC_ROOTFS/home/yunohost.backup/archives ./ @@ -643,21 +513,15 @@ TEST_BACKUP_RESTORE () { && VALIDATE_THAT_APP_CAN_BE_ACCESSED $SUBDOMAIN $check_path local ret=$? - - # Print the result of the backup command - if [ $ret -eq 0 ]; then - log_debug "Restore successful." - SET_RESULT_IF_NONE_YET check_restore 1 - else - log_error "Restore failed." - SET_RESULT check_restore -1 - fi + [ $ret -eq 0 ] || main_result=1 break_before_continue # Stop and restore the LXC container LXC_STOP done + + return $main_result } TEST_CHANGE_URL () { @@ -666,12 +530,13 @@ TEST_CHANGE_URL () { start_test "Change URL" # Check if an install have previously work - at_least_one_install_succeeded || return - this_is_a_web_app || return + at_least_one_install_succeeded || return 1 + this_is_a_web_app || return 0 # Try in 6 times ! # Without modify the domain, root to path, path to path and path to root. # And then, same with a domain change + local main_result=0 local i=0 for i in $(seq 1 7) do @@ -718,16 +583,6 @@ TEST_CHANGE_URL () { local new_domain=$DOMAIN fi - # Validate that install worked in the corresponding configuration previously - - # If any of the begin/end path is /, we need to have root install working - ( [ "$check_path" != "/" ] && [ "$new_path" != "/" ] ) || [ $(GET_RESULT check_root) -eq 1 ] \ - || { log_warning "Root install failed, therefore this test cannot be performed..."; continue; } - - # If any of the being/end path is not /, we need to have sub_dir install working - ( [ "$new_path" == "/" ] && [ "$new_path" == "/" ] ) || [ $(GET_RESULT check_subdir) -eq 1 ] \ - || { log_warning "Subpath install failed, therefore this test cannot be performed..."; continue; } - # Install the application in a LXC container log_small_title "Preliminary install..." \ && LOAD_SNAPSHOT_OR_INSTALL_APP "$check_path" \ @@ -735,17 +590,15 @@ TEST_CHANGE_URL () { && RUN_YUNOHOST_CMD "app change-url $app_id -d '$new_domain' -p '$new_path'" \ && VALIDATE_THAT_APP_CAN_BE_ACCESSED $new_domain $new_path - if [ $ret -eq 0 ] - then - SET_RESULT_IF_NONE_YET change_url 1 - else - SET_RESULT change_url -1 - fi + local ret=$? + [ $ret -eq 0 ] || main_result=1 break_before_continue LXC_STOP done + + return $main_result } # Define a function to split a file in multiple parts. Used for actions and config-panel toml @@ -812,26 +665,14 @@ ACTIONS_CONFIG_PANEL () { fi # Check if an install have previously work - at_least_one_install_succeeded || return + at_least_one_install_succeeded || return 1 # Install the application in a LXC container log_small_title "Preliminary install..." local check_path=$(default_install_path) LOAD_SNAPSHOT_OR_INSTALL_APP "$check_path" - validate_action_config_panel() - { - local message="$1" - - # Print the result of the command - if [ $ret -eq 0 ]; then - SET_RESULT_IF_NONE_YET action_config_panel 1 # Actions succeed - else - SET_RESULT action_config_panel -1 # Actions failed - fi - - break_before_continue - } + local main_result=0 # List first, then execute local ret=0 @@ -855,7 +696,9 @@ ACTIONS_CONFIG_PANEL () { RUN_YUNOHOST_CMD "app action list $app_id" local ret=$? - validate_action_config_panel "yunohost app action list" + [ $ret -eq 0 ] || main_result=1 + break_before_continue + elif [ "$test_type" == "config_panel" ] then log_info "> Show the config panel..." @@ -863,8 +706,9 @@ ACTIONS_CONFIG_PANEL () { # Show the config-panel RUN_YUNOHOST_CMD "app config show-panel $app_id" local ret=$? + [ $ret -eq 0 ] || main_result=1 + break_before_continue - validate_action_config_panel "yunohost app config show-panel" fi elif [ $i -eq 2 ] then @@ -955,7 +799,7 @@ ACTIONS_CONFIG_PANEL () { add_arg="${line//\"/}" # Then add this argument and follow it by : check_process_arguments="${check_process_arguments}${add_arg}:" - done < $test_serie_dir/check_process.configpanel_infos + done < $test_serie_dir/check_process.configpanel_infos #FIXME elif [ "$test_type" == "actions" ] then local check_process_arguments="" @@ -965,7 +809,7 @@ ACTIONS_CONFIG_PANEL () { add_arg="${line//\"/}" # Then add this argument and follow it by : check_process_arguments="${check_process_arguments}${add_arg}:" - done < $test_serie_dir/check_process.actions_infos + done < $test_serie_dir/check_process.actions_infos #FIXME fi # Look for arguments into the check_process if echo "$check_process_arguments" | grep --quiet "$action_config_argument_name" @@ -1035,13 +879,15 @@ ACTIONS_CONFIG_PANEL () { RUN_YUNOHOST_CMD "app action run $app_id $action_config_action $action_config_argument_built" ret=$? fi - validate_action_config_panel "yunohost action $action_config_action" + [ $ret -eq 0 ] || main_result=1 + break_before_continue done done fi done LXC_STOP + return $main_result } PACKAGE_LINTER () { @@ -1050,61 +896,49 @@ PACKAGE_LINTER () { start_test "Package linter" # Execute package linter and linter_result gets the return code of the package linter - "./package_linter/package_linter.py" "$package_path" > "./temp_linter_result.log" - "./package_linter/package_linter.py" "$package_path" --json > "./temp_linter_result.json" + "./package_linter/package_linter.py" "$package_path" | tee -a "$complete_log" + "./package_linter/package_linter.py" "$package_path" --json | tee -a "$complete_log" > $current_test_results - # Print the results of package linter and copy these result in the complete log - cat "./temp_linter_result.log" | tee --append "$complete_log" - cat "./temp_linter_result.json" >> "$complete_log" - - SET_RESULT linter_broken 0 - SET_RESULT linter_level_6 0 - SET_RESULT linter_level_7 0 - SET_RESULT linter_level_8 0 - - # Check we qualify for level 6, 7, 8 - # Linter will have a warning called "app_in_github_org" if app ain't in the - # yunohost-apps org... - if ! cat "./temp_linter_result.json" | jq ".warning" | grep -q "app_in_github_org" - then - SET_RESULT linter_level_6 1 - fi - if cat "./temp_linter_result.json" | jq ".success" | grep -q "qualify_for_level_7" - then - SET_RESULT linter_level_7 1 - fi - if cat "./temp_linter_result.json" | jq ".success" | grep -q "qualify_for_level_8" - then - SET_RESULT linter_level_8 1 - fi - - # If there are any critical errors, we'll force level 0 - if [[ -n "$(cat "./temp_linter_result.json" | jq ".critical" | grep -v '\[\]')" ]] - then - log_report_test_failed - SET_RESULT linter_broken 1 - SET_RESULT linter -1 - # If there are any regular errors, we'll cap to 4 - elif [[ -n "$(cat "./temp_linter_result.json" | jq ".error" | grep -v '\[\]')" ]] - then - log_report_test_failed - SET_RESULT linter -1 - # Otherwise, test pass (we'll display a warning depending on if there are - # any remaning warnings or not) - else - if [[ -n "$(cat "./temp_linter_result.json" | jq ".warning" | grep -v '\[\]')" ]] - then - log_report_test_warning - else - log_report_test_success - fi - SET_RESULT linter 1 - fi +# # Check we qualify for level 6, 7, 8 +# # Linter will have a warning called "app_in_github_org" if app ain't in the +# # yunohost-apps org... +# if ! cat "./temp_linter_result.json" | jq ".warning" | grep -q "app_in_github_org" +# then +# local pass_level_6="true" +# fi +# if cat "./temp_linter_result.json" | jq ".success" | grep -q "qualify_for_level_7" +# then +# local pass_level_7="true" +# fi +# if cat "./temp_linter_result.json" | jq ".success" | grep -q "qualify_for_level_8" +# then +# local pass_level_8="true" +# fi +# +# # If there are any critical errors, we'll force level 0 +# if [[ -n "$(cat "./temp_linter_result.json" | jq ".critical" | grep -v '\[\]')" ]] +# then +# local pass_level_0="false" +# # If there are any regular errors, we'll cap to 4 +# elif [[ -n "$(cat "./temp_linter_result.json" | jq ".error" | grep -v '\[\]')" ]] +# then +# local pass_level_4="false" +# # Otherwise, test pass (we'll display a warning depending on if there are +# # any remaning warnings or not) +# else +# if [[ -n "$(cat "./temp_linter_result.json" | jq ".warning" | grep -v '\[\]')" ]] +# then +# log_report_test_warning +# else +# log_report_test_success +# fi +# local pass_level_4="true" +# fi } set_witness_files () { # Create files to check if the remove script does not remove them accidentally - echo "Create witness files..." >> "$complete_log" + log_debug "Create witness files..." create_witness_file () { [ "$2" = "file" ] && local action="touch" || local action="mkdir -p" @@ -1159,7 +993,7 @@ check_witness_files () { if sudo test ! -e "${LXC_ROOTFS}${1}" then log_error "The file $1 is missing ! Something gone wrong !" - SET_RESULT witness 1 + SET_RESULT "failure" witness fi } @@ -1184,13 +1018,13 @@ check_witness_files () { # Config fpm if [ -d "${LXC_ROOTFS}/etc/php5/fpm" ]; then - check_file_exist "/etc/php5/fpm/pool.d/witnessfile.conf" file + check_file_exist "/etc/php5/fpm/pool.d/witnessfile.conf" fi if [ -d "${LXC_ROOTFS}/etc/php/7.0/fpm" ]; then - check_file_exist "/etc/php/7.0/fpm/pool.d/witnessfile.conf" file + check_file_exist "/etc/php/7.0/fpm/pool.d/witnessfile.conf" fi if [ -d "${LXC_ROOTFS}/etc/php/7.3/fpm" ]; then - check_file_exist "/etc/php/7.3/fpm/pool.d/witnessfile.conf" file + check_file_exist "/etc/php/7.3/fpm/pool.d/witnessfile.conf" fi # Config logrotate @@ -1203,21 +1037,16 @@ check_witness_files () { if ! RUN_INSIDE_LXC mysqlshow --user=root --password=$(sudo cat "$LXC_ROOTFS/etc/yunohost/mysql") witnessdb > /dev/null 2>&1 then log_error "The database witnessdb is missing ! Something gone wrong !" - SET_RESULT witness 1 + SET_RESULT "failure" witness + return 1 fi - - [ $(GET_RESULT witness) -eq 1 ] && return 1 || return 0 } -RUN_TEST_SERIE() { +RUN_ALL_TESTS() { # Launch all tests successively - test_serie_dir=$1 - curl_error=0 - log_title "Tests serie: $(cat $test_serie_dir/test_serie_name)" - # Be sure that the container is running LXC_START "true" @@ -1230,32 +1059,41 @@ RUN_TEST_SERIE() { current_test_number=1 # The list of test contains for example "TEST_UPGRADE some_commit_id - readarray -t tests < $test_serie_dir/tests_to_perform - for test in "${tests[@]}"; + for testfile in $(ls $TEST_CONTEXT/tests/*.json); do - TEST_LAUNCHER $test + TEST_LAUNCHER $testfile done } TEST_LAUNCHER () { - # Abstract for test execution. - # $1 = Name of the function to execute - # $2 = Argument for the function + local testfile="$1" # Start the timer for this test start_timer # And keep this value separately local global_start_timer=$starttime + current_test_id=$(basename $test | cut -d. -f1) + current_test_infos="$TEST_CONTEXT/tests/$current_test_id.json" + current_test_results="$TEST_CONTEXT/results/$current_test_id.json" + echo "{}" > $current_test_results + + local test_type=$(jq '.test_type' $testfile) + local test_arg=$(jq '.test_arg' $testfile) + # Execute the test - $1 $2 + $test_type $test_arg + + [ $? -eq 0 ] && SET_RESULT "success" main_result || SET_RESULT "failure" main_result + + break_before_continue # Restore the started time for the timer starttime=$global_start_timer # End the timer for the test stop_timer 2 - + LXC_STOP # Update the lock file with the date of the last finished test. @@ -1263,4 +1101,25 @@ TEST_LAUNCHER () { echo "$1 $2:$(date +%s):$$" > "$lock_file" } +SET_RESULT() { + local result=$1 + local name=$2 + [ $result -eq "success" ] && log_report_test_success || log_report_test_failed + local current_results="$(cat $current_test_results)" + echo "$current_results" | jq --arg result $result ".$name=\$result" > $current_test_results +} + +at_least_one_install_succeeded () { + + for TEST in $(ls $TEST_CONTEXT/tests/*.json) + do + local test_id=$(basename $TEST | cut -d. -f1) + jq -e '. | select(.test_type == "TEST_INSTALL")' $TEST \ + && jq -e '. | select(.main_result == "success")' $TEST_CONTEXT/results/$test_id.json \ + && return 0 + done + + log_error "All installs failed, therefore the following tests cannot be performed..." + return 1 +}