From a15015c905c582d9fde00e834eb8c2fb90419b82 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Sun, 6 Dec 2020 03:23:27 +0100 Subject: [PATCH] More madness cleanup --- config.defaults | 33 + config.modele | 28 - package_check.sh | 1389 ++++++++------------------------ sub_scripts/auto_upgrade.sh | 27 - sub_scripts/common.sh | 215 ++++- sub_scripts/launcher.sh | 208 ++--- sub_scripts/lxc_build.sh | 394 ++++----- sub_scripts/lxc_check.sh | 143 +--- sub_scripts/lxc_force_start.sh | 41 - sub_scripts/lxc_force_stop.sh | 49 -- sub_scripts/lxc_remove.sh | 50 +- sub_scripts/lxc_upgrade.sh | 123 --- sub_scripts/notifications.sh | 196 +++++ sub_scripts/testing_process.sh | 1126 +++++++++++--------------- 14 files changed, 1493 insertions(+), 2529 deletions(-) create mode 100644 config.defaults delete mode 100644 config.modele delete mode 100755 sub_scripts/auto_upgrade.sh delete mode 100755 sub_scripts/lxc_force_start.sh delete mode 100755 sub_scripts/lxc_force_stop.sh delete mode 100755 sub_scripts/lxc_upgrade.sh create mode 100755 sub_scripts/notifications.sh mode change 100644 => 100755 sub_scripts/testing_process.sh diff --git a/config.defaults b/config.defaults new file mode 100644 index 0000000..c4c9129 --- /dev/null +++ b/config.defaults @@ -0,0 +1,33 @@ +##################### +# LXC Configuration # +##################### + +# Network stuff +MAIN_NETWORK_INTERFACE=$(sudo ip route | grep default | awk '{print $5;}') +LXC_BRIDGE="lxc-pchecker" +LXC_NETWORK="10.1.4" +DNS_RESOLVER="80.67.169.12" + +# Container configuration +DISTRIB="buster" +LXC_NAME="pchecker_lxc" +LXC_ROOTFS="/var/lib/lxc/$LXC_NAME/rootfs" +LXC_SNAPSHOTS="/var/lib/lxcsnaps/$LXC_NAME" + +########################### +# Yunohost configuration # +########################### + +# By default we'll install Yunohost with the default branch +YNH_INSTALL_SCRIPT_BRANCH="" + +# Admin password +YUNO_PWD="admin" + +# Domaines de test +DOMAIN="domain.tld" +SUBDOMAIN="sub.$DOMAIN" + +# User de test +TEST_USER="package_checker" + diff --git a/config.modele b/config.modele deleted file mode 100644 index c148ee3..0000000 --- a/config.modele +++ /dev/null @@ -1,28 +0,0 @@ -# Interface réseau principale de l'hôte -iface= - -# Adresse du dns -dns= - -# Forçage du dns -dnsforce= - -# Plage IP du conteneur -PLAGE_IP= - -# Domaine de test -DOMAIN= - -# Mot de passe -YUNO_PWD= - -# Nom du conteneur -LXC_NAME= - -# Nom du bridge -LXC_BRIDGE= - -# Distribution debian -DISTRIB= -# Branche à utiliser pour le script d'install de cette distribution (si non standard) -BRANCH= diff --git a/package_check.sh b/package_check.sh index 4af3afb..5a50000 100755 --- a/package_check.sh +++ b/package_check.sh @@ -1,10 +1,20 @@ #!/bin/bash -#================================================= -# Grab the script directory -#================================================= +cd $(dirname $(realpath $0) | sed 's@/sub_scripts$@@g') +source "./sub_scripts/common.sh" +source "./sub_scripts/launcher.sh" +source "./sub_scripts/testing_process.sh" -if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi +complete_log="./Complete.log" +test_series="" + +# Purge some log files +> "$complete_log" +> "./lxc_boot.log" + +TEST_CONTEXT="./.tmp_test_context" +rm -rf $TEST_CONTEXT +mkdir -p $TEST_CONTEXT #================================================= # Starting and checking @@ -12,6 +22,26 @@ if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$( # Generic functions #================================================= +print_help() { + cat << EOF + +Usage: +package_check.sh [OPTION]... PACKAGE_TO_CHECK + -b, --branch=BRANCH + Specify a branch to check. + -f, --force-install-ok + Force remaining tests even if installation tests failed or were not selected for execution. + -i, --interactive + Wait for the user to continue before each remove. + -h, --help + Display this help + -l, --build-lxc + Install LXC and build the container if necessary. +EOF +exit 0 +} + + clean_exit () { # Exit and remove all temp files # $1 = exit code @@ -20,10 +50,9 @@ clean_exit () { LXC_TURNOFF # Remove temporary files - rm -f "$script_dir/url_output" - rm -f "$script_dir/curl_print" - rm -f "$script_dir/manifest_extract" - rm -rf "$script_dir/tmp_context_for_tests" + rm -f "./url_output" + rm -f "./curl_print" + rm -rf "$TEST_CONTEXT" # Remove the application which been tested if [ -n "$package_path" ]; then @@ -37,164 +66,104 @@ clean_exit () { } #================================================= -# Check and read CLI arguments +# Pase CLI arguments #================================================= -echo "" +# If no arguments provided +# Print the help and exit +[ "$#" -eq 0 ] && print_help -# Init arguments value gitbranch="" force_install_ok=0 -interrupt=0 -notice=0 +interactive=0 build_lxc=0 -bash_mode=0 -show_resources=0 +arguments=("$@") +getopts_built_arg=() -# If no arguments provided -if [ "$#" -eq 0 ] -then - # Print the help and exit - notice=1 -else - # Store arguments in a array to keep each argument separated - arguments=("$@") - getopts_built_arg=() +# Read the array value per value +for i in `seq 0 $(( ${#arguments[@]} -1 ))` +do + if [[ "${arguments[$i]}" =~ "--branch=" ]] + then + getopts_built_arg+=(-b) + arguments[$i]=${arguments[$i]//--branch=/} + fi + # For each argument in the array, reduce to short argument for getopts + arguments[$i]=${arguments[$i]//--force-install-ok/-f} + arguments[$i]=${arguments[$i]//--interactive/-i} + arguments[$i]=${arguments[$i]//--help/-h} + arguments[$i]=${arguments[$i]//--build-lxc/-l} + getopts_built_arg+=("${arguments[$i]}") +done - # Read the array value per value - for i in `seq 0 $(( ${#arguments[@]} -1 ))` +# Read and parse all the arguments +# Use a function here, to use standart arguments $@ and be able to use shift. +parse_arg () { + while [ $# -ne 0 ] do - if [[ "${arguments[$i]}" =~ "--branch=" ]] + # If the paramater begins by -, treat it with getopts + if [ "${1:0:1}" == "-" ] then - getopts_built_arg+=(-b) - arguments[$i]=${arguments[$i]//--branch=/} + # Initialize the index of getopts + OPTIND=1 + # Parse with getopts only if the argument begin by - + getopts ":b:fihlyr" parameter || true + case $parameter in + b) + # --branch=branch-name + gitbranch="-b $OPTARG" + shift_value=2 + ;; + f) + # --force-install-ok + force_install_ok=1 + shift_value=1 + ;; + i) + # --interactive + interactive=1 + shift_value=1 + ;; + h) + # --help + print_help + ;; + l) + # --build-lxc + build_lxc=1 + shift_value=1 + ;; + \?) + echo "Invalid argument: -${OPTARG:-}" + print_help + ;; + :) + echo "-$OPTARG parameter requires an argument." + print_help + ;; + esac + # Otherwise, it's not an option, it's an operand + else + path_to_package_to_test="$1" + shift_value=1 fi - # For each argument in the array, reduce to short argument for getopts - arguments[$i]=${arguments[$i]//--force-install-ok/-f} - arguments[$i]=${arguments[$i]//--interrupt/-i} - arguments[$i]=${arguments[$i]//--help/-h} - arguments[$i]=${arguments[$i]//--build-lxc/-l} - arguments[$i]=${arguments[$i]//--bash-mode/-y} - arguments[$i]=${arguments[$i]//--show-resources/-r} - getopts_built_arg+=("${arguments[$i]}") + # Shift the parameter and its argument + shift $shift_value done +} - # Read and parse all the arguments - # Use a function here, to use standart arguments $@ and be able to use shift. - parse_arg () { - while [ $# -ne 0 ] - do - # If the paramater begins by -, treat it with getopts - if [ "${1:0:1}" == "-" ] - then - # Initialize the index of getopts - OPTIND=1 - # Parse with getopts only if the argument begin by - - getopts ":b:fihlyr" parameter || true - case $parameter in - b) - # --branch=branch-name - gitbranch="-b $OPTARG" - shift_value=2 - ;; - f) - # --force-install-ok - force_install_ok=1 - shift_value=1 - ;; - i) - # --interrupt - interrupt=1 - shift_value=1 - ;; - h) - # --help - notice=1 - shift_value=1 - ;; - l) - # --build-lxc - build_lxc=1 - shift_value=1 - ;; - y) - # --bash-mode - bash_mode=1 - shift_value=1 - ;; - r) - # --show-resources - show_resources=1 - shift_value=1 - ;; - \?) - echo "Invalid argument: -${OPTARG:-}" - notice=1 - shift_value=1 - ;; - :) - echo "-$OPTARG parameter requires an argument." - notice=1 - shift_value=1 - ;; - esac - # Otherwise, it's not an option, it's an operand - else - app_arg="$1" - shift_value=1 - fi - # Shift the parameter and its argument - shift $shift_value - done - } - - # Call parse_arg and pass the modified list of args as a array of arguments. - parse_arg "${getopts_built_arg[@]}" -fi - -# Prevent a conflict between --interrupt and --bash-mode -if [ $interrupt -eq 1 ] && [ $bash_mode -eq 1 ] -then - echo "You can't use --interrupt and --bash-mode together !" - notice=1 -fi - -# Print help -if [ $notice -eq 1 ] -then - cat << EOF - -Usage: -package_check.sh [OPTION]... PACKAGE_TO_CHECK - -b, --branch=BRANCH - Specify a branch to check. - -f, --force-install-ok - Force remaining tests even if installation tests failed or were not selected for execution. - -i, --interrupt - Force auto_remove value, break before each remove. - -h, --help - Display this notice. - -l, --build-lxc - Install LXC and build the container if necessary. - -y, --bash-mode -Do not ask for continue check. Ignore auto_remove. - -r, --show-resources - Show the unavailable resources when accessing the url. -EOF -exit 0 -fi +# Call parse_arg and pass the modified list of args as a array of arguments. +parse_arg "${getopts_built_arg[@]}" #================================================= # Check if the lock file exist #================================================= -lock_file="$script_dir/pcheck.lock" - if test -e "$lock_file" then # If the lock file exist echo "The lock file $lock_file is present. Package check would not continue." - if [ $bash_mode -ne 1 ]; then + if [ $interactive -eq 1 ]; then echo -n "Do you want to continue anyway? (y/n) :" read answer fi @@ -211,244 +180,28 @@ fi echo "start:$(date +%s):$$" > "$lock_file" #================================================= -# Check the internet connectivity +# Various logistic checks and upgrades... #================================================= -# Try to ping yunohost.org -ping -q -c 2 yunohost.org > /dev/null 2>&1 -if [ "$?" -ne 0 ]; then - # If fail, try to ping another domain - ping -q -c 2 framasoft.org > /dev/null 2>&1 - if [ "$?" -ne 0 ]; then - # If ping failed twice, it's seems the internet connection is down. - echo "\e[91mUnable to connect to internet.\e[0m" - - # Remove the lock file - rm -f "$lock_file" - # And exit - exit 1 - fi -fi - -#================================================= -# Upgrade Package check -#================================================= - -git_repository=https://github.com/YunoHost/package_check -version_file="$script_dir/pcheck_version" - -check_version="$(git ls-remote $git_repository | cut -f 1 | head -n1)" - -# If the version file exist, check for an upgrade -if [ -e "$version_file" ] -then - # Check if the last commit on the repository match with the current version - if [ "$check_version" != "$(cat "$version_file")" ] - then - # If the versions don't matches. Do an upgrade - echo -e "\e[97m\e[1mUpgrade Package check...\n\e[0m" - - # Build the upgrade script - cat > "$script_dir/upgrade_script.sh" << EOF - -#!/bin/bash -# Clone in another directory -git clone --quiet $git_repository "$script_dir/upgrade" -cp -a "$script_dir/upgrade/." "$script_dir/." -sudo rm -r "$script_dir/upgrade" -# Update the version file -echo "$check_version" > "$version_file" -rm "$script_dir/pcheck.lock" -# Execute package check by replacement of this process -exec "$script_dir/package_check.sh" "${arguments[@]}" -EOF - -# Give the execution right -chmod +x "$script_dir/upgrade_script.sh" - -# Temporary upgrade fix -# Check if lynx is already installed. -if [ ! -e "$(which lynx)" ] -then - sudo apt-get install -y lynx -fi - -# Start the upgrade script by replacement of this process -exec "$script_dir/upgrade_script.sh" - fi -fi - -# Update the version file -echo "$check_version" > "$version_file" - -#================================================= -# Upgrade Package linter -#================================================= - -git_repository=https://github.com/YunoHost/package_linter -version_file="$script_dir/plinter_version" - -check_version="$(git ls-remote $git_repository | cut -f 1 | head -n1)" - -# If the version file exist, check for an upgrade -if [ -e "$version_file" ] -then - # Check if the last commit on the repository match with the current version - if [ "$check_version" != "$(cat "$version_file")" ] - then - # If the versions don't matches. Do an upgrade - echo -e "\e[97m\e[1mUpgrade Package linter...\n\e[0m" - - # Clone in another directory - git clone --quiet https://github.com/YunoHost/package_linter "$script_dir/package_linter_tmp" - pip3 install pyparsing six - - # And replace - cp -a "$script_dir/package_linter_tmp/." "$script_dir/package_linter/." - sudo rm -r "$script_dir/package_linter_tmp" - fi -else - echo -e "\e[97mInstall Package linter.\n\e[0m" - git clone --quiet $git_repository "$script_dir/package_linter" - pip3 install pyparsing six -fi - -# Update the version file -echo "$check_version" > "$version_file" - -#================================================= -# Get variables from the config file -#================================================= - -pcheck_config="$script_dir/config" -build_script="$script_dir/sub_scripts/lxc_build.sh" - -if [ -e "$pcheck_config" ] -then - # Read the config file if it exists - ip_range=$(grep PLAGE_IP= "$pcheck_config" | cut -d '=' -f2) - main_domain=$(grep DOMAIN= "$pcheck_config" | cut -d '=' -f2) - yuno_pwd=$(grep YUNO_PWD= "$pcheck_config" | cut -d '=' -f2) - lxc_name=$(grep LXC_NAME= "$pcheck_config" | cut -d '=' -f2) - lxc_bridge=$(grep LXC_BRIDGE= "$pcheck_config" | cut -d '=' -f2) - main_iface=$(grep iface= "$pcheck_config" | cut -d '=' -f2) -fi - -# Use the default value and set it in the config file -replace_default_value () { - CONFIG_KEY=$1 - local value=$(grep "|| $CONFIG_KEY=" "$build_script" | cut -d '=' -f2) - if grep -q $CONFIG_KEY= "$pcheck_config" - then - sed -i "s/$CONFIG_KEY=.*/$CONFIG_KEY=$value/" "$pcheck_config" - else - echo -e "$CONFIG_KEY=$value\n" >> "$pcheck_config" - fi - echo $value -} -# Use default value from the build script if needed -if [ -z "$ip_range" ]; then - ip_range=$(replace_default_value PLAGE_IP) -fi -if [ -z "$main_domain" ]; then - main_domain=$(replace_default_value DOMAIN) -fi -if [ -z "$yuno_pwd" ]; then - yuno_pwd=$(replace_default_value YUNO_PWD) -fi -if [ -z "$lxc_name" ]; then - lxc_name=$(replace_default_value LXC_NAME) -fi -if [ -z "$lxc_bridge" ]; then - lxc_bridge=$(replace_default_value LXC_BRIDGE) -fi - -if [ -z "$main_iface" ]; then - # Try to determine the main iface - main_iface=$(sudo ip route | grep default | awk '{print $5;}') - if [ -z $main_iface ] - then - echo -e "\e[91mUnable to find the name of the main iface.\e[0m" - - # Remove the lock file - rm -f "$lock_file" - # And exit - exit 1 - fi - # Store the main iface in the config file - if grep -q iface= "$pcheck_config" - then - sed -i "s/iface=.*/iface=$main_iface/" - else - echo -e "# Main host iface\niface=$main_iface\n" >> "$pcheck_config" - fi -fi - -#================================================= -# Check the user who try to execute this script -#================================================= - -setup_user_file="$script_dir/sub_scripts/setup_user" -if [ -e "$setup_user_file" ] -then - # Compare the current user and the user stored in $setup_user_file - authorised_user="$(cat "$setup_user_file")" - if [ "$(whoami)" != "$authorised_user" ] - then - critical "This script need to be executed by the user $setup_user_file !\nThe current user is $(whoami)." - fi -else - echo -e "\e[93mUnable to define the user who authorised to use package check. Please fill the file $setup_user_file\e[0m" -fi - -#================================================= -# Define globals variables -#================================================= - -# Complete result log. Complete log of YunoHost -complete_log="$script_dir/Complete.log" -# Real YunoHost log -yunohost_log="/var/lib/lxc/$lxc_name/rootfs/var/log/yunohost/yunohost-cli.log" - -sub_domain="sous.$main_domain" -test_user=package_checker - -#================================================= -# Load all functions -#================================================= - -source "$script_dir/sub_scripts/common.sh" -source "$script_dir/sub_scripts/launcher.sh" -source "$script_dir/sub_scripts/testing_process.sh" - -#================================================= -# Check LXC -#================================================= +assert_we_are_the_setup_user +assert_we_are_connected_to_the_internets +self_upgrade +fetch_or_upgrade_package_linter # Check if lxc is already installed if dpkg-query -W -f '${Status}' "lxc" 2>/dev/null | grep -q "ok installed" then # If lxc is installed, check if the container is already built. - if ! sudo lxc-ls | grep -q "$lxc_name" + if ! sudo lxc-ls | grep -q "$LXC_NAME" then - if [ $build_lxc -eq 1 ] - then - # If lxc's not installed and build_lxc set. Asks to build the container. - build_lxc=2 - else - critical "LXC is not installed or the container $lxc_name doesn't exist.\nUse the script 'lxc_build.sh' to fix them." - fi + # If lxc's not installed and build_lxc set. Asks to build the container. + [ $build_lxc -eq 1 ] || log_critical "LXC is not installed or the container $LXC_NAME doesn't exist.\nYou should build it with 'lxc_build.sh'." + ./sub_scripts/lxc_build.sh fi elif [ $build_lxc -eq 1 ] then # If lxc's not installed and build_lxc set. Asks to build the container. - build_lxc=2 -fi - -if [ $build_lxc -eq 2 ] -then - # Install LXC and build the container before continue. - "$script_dir/sub_scripts/lxc_build.sh" + ./sub_scripts/lxc_build.sh fi # Stop and restore the LXC container. In case of previous incomplete execution. @@ -456,88 +209,71 @@ LXC_STOP # Deactivate LXC network LXC_TURNOFF -#================================================= -# Determine if it's a CI environment -#================================================= - -# By default, it's a standalone execution. -type_exec_env=0 -if [ -e "$script_dir/../config" ] -then - # CI environment - type_exec_env=1 -fi -if [ -e "$script_dir/../auto_build/auto.conf" ] -then - # Official CI environment - type_exec_env=2 -fi - #================================================= # Pick up the package #================================================= -echo "Pick up the package which will be tested." +function FETCH_PACKAGE_TO_TEST() { -# If the url is on a specific branch, extract the branch -if echo "$app_arg" | grep --quiet --extended-regexp "https?:\/\/.*\/tree\/" -then - gitbranch="-b ${app_arg##*/tree/}" - app_arg="${app_arg%%/tree/*}" -fi + local path_to_package_to_test="$1" -if [ -n "$gitbranch" ] -then - branch_msg=" on the branch ${gitbranch##-b }" -fi -info "Test the package $app_arg $branch_msg" - -# Remove the previous package if it's still here. -rm -rf "$script_dir"/*_check - -package_dir="$(basename "$app_arg")_check" -package_path="$script_dir/$package_dir" - -# If the package is in a git repository -if echo "$app_arg" | grep -Eq "https?:\/\/" -then - # Force the branch master if no branch is specified. - if [ -z "$gitbranch" ] + # If the url is on a specific branch, extract the branch + if echo "$path_to_package_to_test" | grep -Eq "https?:\/\/.*\/tree\/" then - if git ls-remote --quiet --exit-code $app_arg master + gitbranch="-b ${path_to_package_to_test##*/tree/}" + path_to_package_to_test="${path_to_package_to_test%%/tree/*}" + fi + + log_info "Testing the package $path_to_package_to_test" + [ -n "$gitbranch" ] && log_info " on the branch ${gitbranch##-b }" + + package_path="$TEST_CONTEXT/app_folder" + + # If the package is in a git repository + if echo "$path_to_package_to_test" | grep -Eq "https?:\/\/" + then + # Force the branch master if no branch is specified. + if [ -z "$gitbranch" ] then - gitbranch="-b master" - else - if git ls-remote --quiet --exit-code $app_arg stable + if git ls-remote --quiet --exit-code $path_to_package_to_test master then - gitbranch="-b stable" + gitbranch="-b master" else - critical "Unable to find a default branch to test (master or stable)" + if git ls-remote --quiet --exit-code $path_to_package_to_test stable + then + gitbranch="-b stable" + else + log_critical "Unable to find a default branch to test (master or stable)" + fi fi fi + # Clone the repository + git clone --quiet $path_to_package_to_test $gitbranch "$package_path" + + # If it's a local directory + else + # Do a copy in the directory of Package check + cp -a "$path_to_package_to_test" "$package_path" fi - # Clone the repository - git clone $app_arg $gitbranch "$package_path" - - # If it's a local directory -else - # Do a copy in the directory of Package check - cp -a "$app_arg" "$package_path" -fi - -# Check if the package directory is really here. -if [ ! -d "$package_path" ]; then - critical "Unable to find the directory $package_path for the package..." -fi + # Check if the package directory is really here. + if [ ! -d "$package_path" ]; then + log_critical "Unable to find the directory $package_path for the package..." + fi +} +FETCH_PACKAGE_TO_TEST $path_to_package_to_test +readonly app_id="$(cat $package_path/manifest.json | jq .id | tr -d '"')" #================================================= # Determine and print the results #================================================= -TEST_RESULTS () { +COMPUTE_RESULTS_SUMMARY () { + + local test_serie_id=$1 + source $TEST_CONTEXT/$test_serie_id/results # Print the test result print_result () { @@ -575,8 +311,6 @@ TEST_RESULTS () { print_result "Change URL" $RESULT_change_url print_result "Actions and config-panel" $RESULT_action_config_panel - - # Determine the level for this app # Each level can has 5 different values @@ -684,6 +418,11 @@ TEST_RESULTS () { [ "${level[8]}" == "2" ] ) } + pass_level_9() { + list_url="https://raw.githubusercontent.com/YunoHost/apps/master/apps.json" + curl --silent $list_url | jq ".[\"$app_id\"].high_quality" | grep -q "true" + } + # Check if the level can be changed level_can_change () { # If the level is set at auto, it's waiting for a change @@ -699,27 +438,9 @@ TEST_RESULTS () { if level_can_change 6; then pass_level_6 && level[6]=2 || level[6]=0; fi if level_can_change 7; then pass_level_7 && level[7]=2 || level[7]=0; fi if level_can_change 8; then pass_level_8 && level[8]=2 || level[8]=0; fi + if level_can_change 9; then pass_level_9 && level[9]=2 || level[9]=0; fi - # Evaluate the ninth level - # -> High quality package. - # The level 9 can be validated only by the official list of app. - level[9]=0 - # Define the level 9 only if we're working on a repository. Otherwise, we can't assert that this is the correct app. - if echo "$app_arg" | grep --extended-regexp --quiet "https?:\/\/" - then - # Get the name of the app from the repository name. - app_name="$(basename --multiple --suffix=_ynh "$app_arg")" - - # Get the last version of the app list - list_url="https://raw.githubusercontent.com/YunoHost/apps/master/apps.json" - if curl --silent $list_url | jq ".[\"$app_name\"].high_quality" | grep -q "true" - then - level[9]=2 - fi - fi - - # Evaluate the tenth level - # -> Not available yet... + # Level 10 has no definition yet level[10]=0 # Initialize the global level @@ -755,27 +476,27 @@ TEST_RESULTS () { # If some witness files was missing, it's a big error ! So, the level fall immediately at 0. if [ $RESULT_witness -eq 1 ] then - error "Some witness files has been deleted during those tests ! It's a very bad thing !" + log_error "Some witness files has been deleted during those tests ! It's a very bad thing !" global_level=0 fi # If the package linter returned a critical error, the app is flagged as broken / level 0 if [ $RESULT_linter_broken -eq 1 ] then - error "The package linter reported a critical failure ! App is considered broken !" + log_error "The package linter reported a critical failure ! App is considered broken !" global_level=0 fi if [ $RESULT_alias_traversal -eq 1 ] then - error "Issue alias_traversal was detected ! Please see here https://github.com/YunoHost/example_ynh/pull/45 to fix that." + log_error "Issue alias_traversal was detected ! Please see here https://github.com/YunoHost/example_ynh/pull/45 to fix that." fi # Then, print the levels # Print the global level - verbose_level=$(grep "^$global_level " "$script_dir/levels.list" | cut -c4-) + verbose_level=$(grep "^$global_level " "./levels.list" | cut -c4-) - info "Level of this application: $global_level ($verbose_level)" + log_info "Level of this application: $global_level ($verbose_level)" # And print the value for each level for i in `seq 1 10` @@ -793,81 +514,35 @@ TEST_RESULTS () { #================================================= # Parsing and performing tests #================================================= -# Check if a check_process file exist -#================================================= -check_file=1 -check_process="$package_path/check_process" - -if [ ! -e "$check_process" ] -then - error "Unable to find a check_process file." - warning "Package check will attempt to automatically guess what tests to run." - check_file=0 -fi - -#================================================= -# Set the timer for all tests -#================================================= - -# Start the timer for this test -start_timer -# And keep this value separately -complete_start_timer=$starttime - -#================================================= -# Initialize tests -#================================================= - -# Purge some log files -> "$complete_log" -> "$script_dir/lxc_boot.log" - -# Initialize LXC network -LXC_INIT # Default values for check_process and TESTING_PROCESS -initialize_values() { - # Test results - RESULT_witness=0 - RESULT_alias_traversal=0 - RESULT_linter=0 - RESULT_linter_level_6=0 - RESULT_linter_level_7=0 - RESULT_linter_level_8=0 - RESULT_linter_broken=0 - RESULT_global_setup=0 - RESULT_global_remove=0 - RESULT_check_sub_dir=0 - RESULT_check_root=0 - RESULT_check_remove_sub_dir=0 - RESULT_check_remove_root=0 - RESULT_check_upgrade=0 - RESULT_check_backup=0 - RESULT_check_restore=0 - RESULT_check_private=0 - RESULT_check_public=0 - RESULT_check_multi_instance=0 - RESULT_check_port=0 - RESULT_change_url=0 - RESULT_action_config_panel=0 - - # auto_remove parameter - if [ $interrupt -eq 1 ]; then - auto_remove=0 - else - auto_remove=1 - fi - - # Number of tests to proceed - total_number_of_test=0 - - # Default path - test_path=/ - - # CHECK_URL default values - curl_error=0 - yuno_portal=0 +init_results() { + local test_serie_id=$1 + cat << EOF > $TEST_CONTEXT/$test_serie_id/results +RESULT_witness=0 +RESULT_alias_traversal=0 +RESULT_linter=0 +RESULT_linter_level_6=0 +RESULT_linter_level_7=0 +RESULT_linter_level_8=0 +RESULT_linter_broken=0 +RESULT_global_setup=0 +RESULT_global_remove=0 +RESULT_check_sub_dir=0 +RESULT_check_root=0 +RESULT_check_remove_sub_dir=0 +RESULT_check_remove_root=0 +RESULT_check_upgrade=0 +RESULT_check_backup=0 +RESULT_check_restore=0 +RESULT_check_private=0 +RESULT_check_public=0 +RESULT_check_multi_instance=0 +RESULT_check_port=0 +RESULT_change_url=0 +RESULT_action_config_panel=0 +EOF } #================================================= @@ -875,181 +550,77 @@ initialize_values() { #================================================= # Parse the check_process only if it's exist -if [ $check_file -eq 1 ] -then - info "Parsing check_process file" +check_process="$package_path/check_process" + +# Extract a section found between $1 and $2 from the file $3 +extract_check_process_section () { + local source_file="${3:-$check_process}" + local extract=0 + local line="" + while read line + do + # Extract the line + if [ $extract -eq 1 ] + then + # Check if the line is the second line to found + if echo $line | grep -q "$2"; then + # Break the loop to finish the extract process + break; + fi + # Copy the line in the partial check_process + echo "$line" + fi + + # Search for the first line + if echo $line | grep -q "$1"; then + # Activate the extract process + extract=1 + fi + done < "$source_file" +} + + +parse_check_process() { + + log_info "Parsing check_process file" # Remove all commented lines in the check_process sed --in-place '/^#/d' "$check_process" # Remove all spaces at the beginning of the lines sed --in-place 's/^[ \t]*//g' "$check_process" - # Search a string in the partial check_process - find_string () { - echo $(grep -m1 "$1" "$check_process_section") - } - - # Extract a section found between $1 and $2 from the file $3 - extract_section () { - # Erase the partial check_process - > "$check_process_section" - local source_file="$3" - local extract=0 - local line="" - while read line - do - # Extract the line - if [ $extract -eq 1 ] - then - # Check if the line is the second line to found - if echo $line | grep -q "$2"; then - # Break the loop to finish the extract process - break; - fi - # Copy the line in the partial check_process - echo "$line" >> "$check_process_section" - fi - - # Search for the first line - if echo $line | grep -q "$1"; then - # Activate the extract process - extract=1 - fi - done < "$source_file" - } - - # Use 2 partial files, to keep one for a whole tests serie - partial1="${check_process}_part1" - partial2="${check_process}_part2" - # Extract the Options section - check_process_section=$partial1 - extract_section "^;;; Options" ";; " "$check_process" - - # Try to find a optionnal email address to notify the maintainer - # In this case, this email will be used instead of the email from the manifest. - dest="$(echo $(find_string "^Email=") | cut -d '=' -f2)" - - # Try to find a optionnal option for the grade of notification - notification_grade="$(echo $(find_string "^Notification=") | cut -d '=' -f2)" - + extract_check_process_section "^;;; Options" ";; " > $TEST_CONTEXT/check_process.options + extract_check_process_section "^;;; Upgrade options" ";; " > $TEST_CONTEXT/check_process.upgrade_options # Parse each tests serie while read <&3 tests_serie do + local test_serie_id=$(tr -dc A-Za-z0-9 $test_serie_dir/test_serie_name + extract_check_process_section "^$tests_serie" "^;;" > $test_serie_rawconf + extract_check_process_section "^; pre-install" "^; " $test_serie_rawconf > $test_serie_dir/preinstall.sh.template + extract_check_process_section "^; Manifest" "^; " $test_serie_rawconf > $test_serie_dir/check_process.manifest_infos + extract_check_process_section "^; Actions" "^; " $test_serie_rawconf > $test_serie_dir/check_process.actions_infos + extract_check_process_section "^; Config_panel" "^; " $test_serie_rawconf > $test_serie_dir/check_process.configpanel_infos + extract_check_process_section "^; Checks" "^; " $test_serie_rawconf > $test_serie_dir/check_process.tests_infos - # Check if there a pre-install instruction for this serie - extract_section "^; pre-install" "^;" "$partial2" - cat "$check_process_section" > ./tmp_context_for_tests/preinstall.sh.template + # This is the arg list to be later fed to "yunohost app install" + cat $test_serie_dir/check_process.manifest_infos \ + | awk '{print $1}' | tr -d '"' | tr '\n' '&' > $test_serie_dir/install_args - # Parse all infos about arguments of manifest - # Extract the manifest arguments section from the second partial file - extract_section "^; Manifest" "^; " "$partial2" - - manifest_arguments=$(cat $check_process_section | awk '{print $1}' | tr -d '"' | tr '\n' '&') - - # Try to find all specific arguments needed for the tests - keep_name_arg_only () { - # Find the line for the given argument - local argument=$(find_string "($1") - # If a line exist for this argument - if [ -n "$argument" ]; then - # Keep only the name of the argument - echo "$(echo "$argument" | cut -d '=' -f1)" - fi - } - domain_arg=$(keep_name_arg_only "DOMAIN") - user_arg=$(keep_name_arg_only "USER") - port_arg=$(keep_name_arg_only "PORT") - path_arg=$(keep_name_arg_only "PATH") - # Get the path value - if [ -n "$path_arg" ] - then - line="$(find_string "(PATH")" - # Keep only the part after the = - line="$(echo "$line" | grep -o "path=.* " | cut -d "=" -f2)" - # And remove " et spaces to keep only the path. - line="${line//[\" ]/}" - # If this path is not empty or equal to /. It become the new default path value. - if [ ${#line} -gt 1 ]; then - test_path="$line" - fi - fi - public_arg=$(keep_name_arg_only "PUBLIC") - # Find the values for public and private - if [ -n "$public_arg" ] - then - line=$(find_string "(PUBLIC") - public_public_arg=$(echo "$line" | grep -o "|public=[[:alnum:]]*" | cut -d "=" -f2) - public_private_arg=$(echo "$line" | grep -o "|private=[[:alnum:]]*" | cut -d "=" -f2) - fi - - if echo "$LIGNE" | grep -q "(PATH)"; then # Path dans le manifest - MANIFEST_PATH=$(echo "$LIGNE" | cut -d '=' -f1) # Récupère la clé du manifest correspondant au path - parse_path=$(echo "$LIGNE" | cut -d '"' -f2) # Lit le path du check_process - if [ -n "$parse_path" ]; then # Si le path nest pas null, utilise ce path au lieu de la valeur par défaut. - PATH_TEST=$(echo "$LIGNE" | cut -d '"' -f2) - fi - LIGNE=$(echo "$LIGNE" | cut -d '(' -f1) # Retire lindicateur de clé de manifest à la fin de la ligne - fi - - # Parse all infos about arguments of actions.toml - # Extract the actions arguments section from the second partial file - extract_section "^; Actions" "^; " "$partial2" - - # Initialize the arguments list - actions_arguments="" - - # Read each arguments and store them - while read line - do - # Remove all double quotes - add_arg="${line//\"/}" - # Then add this argument and follow it by : - actions_arguments="${actions_arguments}${add_arg}:" - done < "$check_process_section" - - # Parse all infos about arguments of config-panel.toml - # Extract the config_panel arguments section from the second partial file - extract_section "^; Config_panel" "^; " "$partial2" - - # Initialize the arguments list - config_panel_arguments="" - - # Read each arguments and store them - while read line - do - # Remove all double quotes - add_arg="${line//\"/}" - # Then add this argument and follow it by : - config_panel_arguments="${config_panel_arguments}${add_arg}:" - done < "$check_process_section" - - # Parse all tests to perform - # Extract the checks options section from the second partial file - extract_section "^; Checks" "^; " "$partial2" - - read_check_option () { + is_test_enabled () { # Find the line for the given check option - local value=$(find_string "^$1=" | awk -F= '{print $2}') + local value=$(grep -m1 -o "^$1=." "$test_serie_dir/check_process.tests_infos" | awk -F= '{print $2}') # And return this value if [ "${value:0:1}" = "1" ] then @@ -1062,83 +633,84 @@ then fi } - count_test () { - # Increase the number of test, if this test is set at 1. - test "$1" -eq 1 && total_number_of_test=$((total_number_of_test+1)) - } + cat << EOF > $test_serie_dir/tests_to_perform +pkg_linter=$(is_test_enabled pkg_linter) +setup_sub_dir=$(is_test_enabled setup_sub_dir) +setup_root=$(is_test_enabled setup_root) +setup_nourl=$(is_test_enabled setup_nourl) +setup_private=$(is_test_enabled setup_private) +setup_public=$(is_test_enabled setup_public) +upgrade=$(is_test_enabled upgrade) +backup_restore=$(is_test_enabled backup_restore) +multi_instance=$(is_test_enabled multi_instance) +port_already_use=$(is_test_enabled port_already_use) +change_url=$(is_test_enabled change_url) +actions=$(is_test_enabled actions) +config_panel=$(is_test_enabled config_panel) +EOF - # Get standard options - pkg_linter=$(read_check_option pkg_linter) - count_test $pkg_linter - setup_sub_dir=$(read_check_option setup_sub_dir) - count_test $setup_sub_dir - setup_root=$(read_check_option setup_root) - count_test $setup_root - setup_nourl=$(read_check_option setup_nourl) - count_test $setup_nourl - setup_private=$(read_check_option setup_private) - count_test $setup_private - setup_public=$(read_check_option setup_public) - count_test $setup_public - backup_restore=$(read_check_option backup_restore) - count_test $backup_restore - multi_instance=$(read_check_option multi_instance) - count_test $multi_instance - port_already_use=$(read_check_option port_already_use) - count_test $port_already_use - change_url=$(read_check_option change_url) - count_test $change_url - actions=$(read_check_option actions) - count_test $actions - config_panel=$(read_check_option config_panel) - count_test $config_panel + done 3<<< "$(grep "^;; " "$check_process")" - # For port_already_use, check if there is also a port number - if [ $port_already_use -eq 1 ] - then - line=$(find_string "^port_already_use=") - # If there is port number - if echo "$line" | grep -q "([0-9]*)" - then - # Store the port number in port_arg and prefix it by # to means that not really a manifest arg - port_arg="#$(echo "$line" | cut -d '(' -f2 | cut -d ')' -f1)" - fi +} + +guess_test_configuration() { + + log_error "Not check_process file found." + log_warning "Package check will attempt to automatically guess what tests to run." + + local test_serie_id=$(tr -dc A-Za-z0-9 $test_serie_dir/install_args + + cat << EOF > $test_serie_dir/tests_to_perform +pkg_linter=1 +setup_sub_dir=1 +setup_root=1 +setup_nourl=0 +setup_private=$(grep -q "is_public=" $test_serie_dir/install_args && echo 1 || echo 0) +setup_public=$(grep -q "is_public=" $test_serie_dir/install_args && echo 1 || echo 0)0 +upgrade=1 +backup_restore=1 +multi_instance=$(grep multi_instance "$package_path/manifest.json" | grep -q true && echo 1 || echo 0) +port_already_use=0 +change_url=0 +EOF +} + +#================================================= + +run_all_tests() { + + # Start the timer for this test + start_timer + # And keep this value separately + complete_start_timer=$starttime + + + LXC_INIT + + for test_serie_id in $test_series + do + test_serie_dir=$TEST_CONTEXT/$test_serie_id + + # Break after the first tests serie + if [ $interactive -eq 1 ]; then + read -p "Press a key to start the next tests serie..." < /dev/tty fi - # Clean the upgrade list - touch "$script_dir/tmp_context_for_tests/upgrade_list" - # Get multiples lines for upgrade option. - while $(grep --quiet "^upgrade=" "$check_process_section") - do - # Get the value for the first upgrade test. - temp_upgrade=$(read_check_option upgrade) - count_test $temp_upgrade - # Set upgrade to 1, but never to 0. - if [ "$upgrade" != "1" ]; then - upgrade=$temp_upgrade - fi - # Get this line to find if there an option. - line=$(find_string "^upgrade=") - if echo "$line" | grep --quiet "from_commit=" - then - # Add the commit to the upgrade list - line="${line##*from_commit=}" - # Add the upgrade to the list only if the test is set to 1 - if [ $temp_upgrade -eq 1 ]; then - echo "$line" >> "$script_dir/tmp_context_for_tests/upgrade_list" - fi - elif [ $temp_upgrade -eq 1 ]; then - # Or simply 'current' for a standard upgrade. - echo "current" >> "$script_dir/tmp_context_for_tests/upgrade_list" - fi - # Remove this line from the check_process - sed --in-place "\|${line}$|d" "$check_process_section" - done - # Launch all tests successively - TESTING_PROCESS + RUN_TEST_SERIE $test_serie_dir + # Print the final results of the tests - TEST_RESULTS + COMPUTE_RESULTS_SUMMARY $test_serie_id # Set snap0 as the current snapshot current_snapshot=snap0 @@ -1146,302 +718,23 @@ then unset root_snapshot unset subpath_snapshot - done 3<<< "$(grep "^;; " "$check_process")" + done - # No check_process file. Try to parse the manifest. -else - # Initialize the values for this serie of tests - initialize_values + # Restore the started time for the timer + starttime=$complete_start_timer + # End the timer for the test + stop_timer 3 - manifest_extract="$script_dir/manifest_extract" + echo "You can find the complete log of these tests in $(realpath $complete_log)" - # Extract the informations from the manifest with the Brams sly snake script. - python "$script_dir/sub_scripts/manifest_parsing.py" "$package_path/manifest.json" > "$manifest_extract" + source "./sub_scripts/notifications.sh" - # Default tests - pkg_linter=1 - setup_sub_dir=1 - setup_root=1 - setup_nourl=0 - upgrade=1 - setup_private=1 - setup_public=1 - backup_restore=1 - multi_instance=1 - port_already_use=0 - change_url=0 - total_number_of_test=$((total_number_of_test+9)) - - - # Read each arguments and store them - while read line - do - # Read each argument and pick up the first value. Then replace : by = - add_arg="$(echo $line | cut -d ':' -f1,2 | sed s/:/=/)" - # Then add this argument and follow it by & - manifest_arguments="${manifest_arguments}${add_arg}&" - done < "$manifest_extract" - - # Search a string in the partial check_process - find_string () { - echo $(grep "$1" "$manifest_extract") - } - - # Try to find all specific arguments needed for the tests - keep_name_arg_only () { - # Find the line for the given argument - local argument=$(find_string "$1") - # If a line exist for this argument - if [ -n "$argument" ]; then - # Keep only the name of the argument - echo "$(echo "$argument" | cut -d ':' -f1)" - fi - } - domain_arg=$(keep_name_arg_only ":ynh.local") - path_arg=$(keep_name_arg_only "path:") - user_arg=$(keep_name_arg_only "user:\|admin:") - public_arg=$(keep_name_arg_only "is_public:") - # Find the values for public and private - if [ -n "$public_arg" ] - then - line=$(find_string "is_public:") - # Assume the first value is public and the second is private. - public_public_arg=$(echo "$line" | cut -d ":" -f2) - public_private_arg=$(echo "$line" | cut -d ":" -f3) - fi - - count_test () { - # Decrease the number of test, if this test is not already removed. - if [ $1 -eq 1 ]; then - total_number_of_test=$((total_number_of_test-1)) - return 1 - fi - } - - # Disable some tests if the manifest key doesn't be found - if [ -z "$domain_arg" ] - then - error "The manifest key for domain was not found." - setup_sub_dir=0 - count_test "$setup_root" || setup_root=0 - count_test "$multi_instance" || multi_instance=0 - setup_nourl=1 - fi - if [ -z "$path_arg" ] - then - error "The manifest key for path was not found." - count_test "$setup_root" || setup_root=0 - count_test "$multi_instance" || multi_instance=0 - fi - if [ -z "$public_arg" ] - then - error "The manifest key for public was not found." - setup_private=0 - setup_public=0 - total_number_of_test=$((total_number_of_test-2)) - fi - # Remove the multi-instance test if this parameter is set at false in the manifest. - if grep multi_instance "$package_path/manifest.json" | grep -q false - then - count_test "$multi_instance" || multi_instance=0 - fi - - # Launch all tests successively - TESTING_PROCESS - # Print the final results of the tests - TEST_RESULTS -fi - -echo "You can find the complete log of these tests in $complete_log" - -#================================================= -# Ending the timer -#================================================= - -# Restore the started time for the timer -starttime=$complete_start_timer -# End the timer for the test -stop_timer 3 - -#================================================= -# Notification grade -#================================================= - -notif_grade () { - # Check the level of notification from the check_process. - # Echo 1 if the grade is reached - - compare_grade () - { - if echo "$notification_grade" | grep -q "$1"; then - echo 1 - else - echo 0 - fi - } - - case "$1" in - all) - # If 'all' is needed, only a grade of notification at 'all' can match - compare_grade "^all$" - ;; - change) - # If 'change' is needed, notification at 'all' or 'change' can match - compare_grade "^all$\|^change$" - ;; - down) - # If 'down' is needed, notification at 'all', 'change' or 'down' match - compare_grade "^all$\|^change$\|^down$" - ;; - *) - echo 0 - ;; - esac } -#================================================= -# Inform of the results by XMPP and/or by mail -#================================================= +[ -e "$check_process" ] \ +&& parse_check_process \ +|| guess_test_configuration -send_mail=0 - -# Keep only the name of the app -app_name=${package_dir%_ynh_check} - -# If package check it's in the official CI environment -# Check the level variation -if [ $type_exec_env -eq 2 ] -then - - # Get the job name, stored in the work_list - job=$(head -n1 "$script_dir/../work_list" | cut -d ';' -f 3) - - # Identify the type of test, stable (0), testing (1) or unstable (2) - # Default stable - test_type=0 - message="" - if echo "$job" | grep -q "(testing)" - then - message="(TESTING) " - test_type=1 - elif echo "$job" | grep -q "(unstable)" - then - message="(UNSTABLE) " - test_type=2 - fi - - # Build the log path (and replace all space by %20 in the job name) - if [ -n "$job" ]; then - if systemctl list-units | grep --quiet jenkins - then - job_log="/job/${job// /%20}/lastBuild/console" - elif systemctl list-units | grep --quiet yunorunner - then - # Get the directory of YunoRunner - ci_dir="$(grep WorkingDirectory= /etc/systemd/system/yunorunner.service | cut -d= -f2)" - # List the jobs from YunoRunner and grep the job (without Community or Official). - job_id="$(cd "$ci_dir"; ve3/bin/python ciclic list | grep ${job%% *} | head -n1)" - # Keep only the id of the job, by removing everything after - - job_id="${job_id%% -*}" - # And remove any space before the id. - job_id="${job_id##* }" - job_log="/job/$job_id" - fi - fi - - # If it's a test on testing or unstable - if [ $test_type -gt 0 ] - then - # Remove unstable or testing of the job name to find its stable version in the level list - job="${job% (*)}" - fi - - # Get the previous level, found in the file list_level_stable - previous_level=$(grep "^$job:" "$script_dir/../auto_build/list_level_stable" | cut -d: -f2) - - # Print the variation of the level. If this level is different than 0 - if [ $global_level -gt 0 ] - then - message="${message}Application $app_name" - # If non previous level was found - if [ -z "$previous_level" ]; then - message="$message just reach the level $global_level" - send_mail=$(notif_grade all) - # If the level stays the same - elif [ $global_level -eq $previous_level ]; then - message="$message stays at level $global_level" - # Need notification at 'all' to notify by email - send_mail=$(notif_grade all) - # If the level go up - elif [ $global_level -gt $previous_level ]; then - message="$message rise from level $previous_level to level $global_level" - # Need notification at 'change' to notify by email - send_mail=$(notif_grade change) - # If the level go down - elif [ $global_level -lt $previous_level ]; then - message="$message go down from level $previous_level to level $global_level" - # Need notification at 'down' to notify by email - send_mail=$(notif_grade down) - fi - fi -fi - -# If the app completely failed and obtained 0 -if [ $global_level -eq 0 ] -then - message="${message}Application $app_name has completely failed the continuous integration tests" - - # Always send an email if the app failed - send_mail=1 -fi - -# The mail subject is the message to send, before any logs informations -subject="[YunoHost] $message" - -# If the test was perform in the official CI environment -# Add the log address -# And inform with xmpp -if [ $type_exec_env -eq 2 ] -then - - # Build the address of the server from auto.conf - ci_path=$(grep "DOMAIN=" "$script_dir/../auto_build/auto.conf" | cut -d= -f2)/$(grep "CI_PATH=" "$script_dir/../auto_build/auto.conf" | cut -d= -f2) - - # Add the log adress to the message - message="$message on https://$ci_path$job_log" - - # Send a xmpp notification on the chat room "apps" - # Only for a test with the stable version of YunoHost - if [ $test_type -eq 0 ] - then - "$script_dir/../auto_build/xmpp_bot/xmpp_post.sh" "$message" > /dev/null 2>&1 - fi -fi - -# Send a mail to main maintainer according to notification option in the check_process. -# Only if package check is in a CI environment (Official or not) -if [ $type_exec_env -ge 1 ] && [ $send_mail -eq 1 ] -then - - # Add a 'from' header for the official CI only. - # Apparently, this trick is not needed anymore !? - # if [ $type_exec_env -eq 2 ]; then - # from_yuno="-a \"From: yunohost@yunohost.org\"" - # fi - - # Get the maintainer email from the manifest. If it doesn't found if the check_process - if [ -z "$dest" ]; then - dest=$(grep '\"email\": ' "$package_path/manifest.json" | cut -d '"' -f 4) - fi - - # Send the message by mail, if a address has been find - if [ -n "$dest" ]; then - mail $from_yuno -s "$subject" "$dest" <<< "$message" - fi -fi - -#================================================= -# Clean and exit -#================================================= +run_all_tests clean_exit 0 diff --git a/sub_scripts/auto_upgrade.sh b/sub_scripts/auto_upgrade.sh deleted file mode 100755 index 56237a2..0000000 --- a/sub_scripts/auto_upgrade.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Ce script n'a vocation qu'a être dans un cron. De préférence une fois par jour ou par semaine. - -# Récupère le dossier du script -if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi - -echo "" -date -# Vérifie que Package check n'est pas déjà utilisé. -timeout=7200 # Durée d'attente maximale -inittime=$(date +%s) # Enregistre l'heure de début d'attente -while test -e "$script_dir/../pcheck.lock"; do # Vérifie la présence du lock de Package check - sleep 60 # Attend la fin de l'exécution de Package check. - echo -n "." - if [ $(( $(date +%s) - $inittime )) -ge $timeout ] # Vérifie la durée d'attente - then # Si la durée dépasse le timeout fixé, force l'arrêt. - inittime=0 # Indique l'arrêt forcé du script - echo "Temps d'attente maximal dépassé, la mise à jour est annulée." - break - fi -done -echo "" - -if [ "$inittime" -ne 0 ]; then # Continue seulement si le timeout n'est pas dépassé. - "$script_dir/lxc_upgrade.sh" # Exécute le script d'upgrade de Package check -fi diff --git a/sub_scripts/common.sh b/sub_scripts/common.sh index 2455a6b..9806875 100755 --- a/sub_scripts/common.sh +++ b/sub_scripts/common.sh @@ -1,5 +1,40 @@ #!/bin/bash +[[ -e "./config.defaults" ]] && source "./config.defaults" +[[ -e "./config" ]] && source "./config" + +readonly lock_file="./pcheck.lock" + +#================================================= +# LXC helpers +#================================================= + +RUN_INSIDE_LXC() { + sudo lxc-attach -n $LXC_NAME -- "$@" +} + +RUN_THROUGH_SSH() { + ssh -tt -q $LXC_NAME "sudo $@" +} + +assert_we_are_the_setup_user() { + [ -e "./.setup_user" ] || return + local setup_user=$(cat "./.setup_user") + + [ "$(whoami)" == $setup_user ] \ + || log_critical "Ce script doit être exécuté avec l'utilisateur $setup_user !\nL'utilisateur actuel est $(whoami)." +} + +assert_we_are_connected_to_the_internets() { + ping -q -c 2 yunohost.org > /dev/null 2>&1 \ + || ping -q -c 2 framasoft.org > /dev/null 2>&1 \ + || log_critical "Unable to connect to internet." +} + +#================================================= +# Logging helpers +#================================================= + readonly NORMAL=$(printf '\033[0m') readonly BOLD=$(printf '\033[1m') readonly faint=$(printf '\033[2m') @@ -12,9 +47,9 @@ readonly BLUE=$(printf '\033[34m') readonly YELLOW=$(printf '\033[93m') readonly WHITE=$(printf '\033[39m') -function title() +function log_title() { - cat << EOF | tee -a "$complete_log" + cat << EOF ${BOLD} =================================== $1 @@ -23,51 +58,187 @@ ${NORMAL} EOF } -function small_title() +function log_small_title() { - echo -e "\n${BOLD} > ${1}${NORMAL}\n" | tee -a "$complete_log" + echo -e "\n${BOLD} > ${1}${NORMAL}\n" } -function debug() +function log_debug() { - echo "$1" >> "$complete_log" + echo "$1" } -function info() +function log_info() { - echo "${1}" | tee -a "$complete_log" + echo "${1}" } -function success() +function log_success() { - echo "${BOLD}${GREEN}Success: ${1}${NORMAL}" | tee -a "$complete_log" + echo "${BOLD}${GREEN}Success: ${1}${NORMAL}" } -function warning() +function log_warning() { - echo "${BOLD}${ORANGE}Warning: ${1}${NORMAL}" | tee -a "$complete_log" 2>&1 + echo "${BOLD}${ORANGE}Warning: ${1}${NORMAL}" } -function error() +function log_error() { - echo "${BOLD}${RED}Error: ${1}${NORMAL}" | tee -a "$complete_log" 2>&1 + echo "${BOLD}${RED}Error: ${1}${NORMAL}" } -function critical() +function log_critical() { - echo "${BOLD}${RED}Critical: ${1}${NORMAL}" | tee -a "$complete_log" 2>&1 + echo "${BOLD}${RED}Critical: ${1}${NORMAL}" clean_exit 1 } -function report_test_success () { - echo -e "\n${BOLD}${GREEN}--- SUCCESS ---${NORMAL}\n" | tee -a "$complete_log" 2>&1 +function log_report_test_success () { + echo -e "\n${BOLD}${GREEN}--- SUCCESS ---${NORMAL}\n" } -function report_test_warning () { - echo -e "\n${BOLD}${ORANGE}--- WARNING ---${NORMAL}\n" | tee -a "$complete_log" 2>&1 +function log_report_test_warning () { + echo -e "\n${BOLD}${ORANGE}--- WARNING ---${NORMAL}\n" } -function report_test_failed () { - echo -e "\n${BOLD}${RED}--- FAIL ---${NORMAL}\n" | tee -a "$complete_log" 2>&1 +function log_report_test_failed () { + echo -e "\n${BOLD}${RED}--- FAIL ---${NORMAL}\n" } + +#================================================= +# Timing helpers +#================================================= + +start_timer () { + # Set the beginning of the timer + starttime=$(date +%s) +} + +stop_timer () { + # Ending the timer + # $1 = Type of querying + + local finishtime=$(date +%s) + # Calculate the gap between the starting and the ending of the timer + local elapsedtime=$(echo $(( $finishtime - $starttime ))) + # Extract the number of hour + local hours=$(echo $(( $elapsedtime / 3600 ))) + local elapsedtime=$(echo $(( $elapsedtime - ( 3600 * $hours) ))) + # Minutes + local minutes=$(echo $(( $elapsedtime / 60 ))) + # And seconds + local seconds=$(echo $(( $elapsedtime - ( 60 * $minutes) ))) + + local phours="" + local pminutes="" + local pseconds="" + + # Avoid null values + [ $hours -eq 0 ] || phours="$hours hour" + [ $minutes -eq 0 ] || pminutes="$minutes minute" + [ $seconds -eq 0 ] || pseconds="$seconds second" + + # Add a 's' for plural values + [ $hours -eq 1 ] && phours="${phours}, " || test -z "$phours" || phours="${phours}s, " + [ $minutes -eq 1 ] && pminutes="${pminutes}, " || test -z "$pminutes" || pminutes="${pminutes}s, " + [ $seconds -gt 1 ] && pseconds="${pseconds}s" + + time="${phours}${pminutes}${pseconds} ($(date '+%T'))" + if [ $1 -eq 2 ]; then + log_info "Working time for this test: $time" + elif [ $1 -eq 3 ]; then + log_info "Global working time for all tests: $time" + else + log_debug "Working time: $time" + fi +} + +#================================================= +# Upgrade Package check +#================================================= + + +function self_upgrade() +{ + local git_repository=https://github.com/YunoHost/package_check + local version_file="./.pcheck_version" + + local check_version="$(git ls-remote $git_repository | cut -f 1 | head -n1)" + + # If the version file exist, check for an upgrade + if [ -e "$version_file" ] + then + # Check if the last commit on the repository match with the current version + if [ "$check_version" != "$(cat "$version_file")" ] + then + # If the versions don't matches. Do an upgrade + log_info "Upgrading Package check" + + # Build the upgrade script + cat > "./upgrade_script.sh" << EOF + +#!/bin/bash +# Clone in another directory +git clone --quiet $git_repository "./upgrade" +cp -a "./upgrade/." "./." +sudo rm -r "./upgrade" +# Update the version file +echo "$check_version" > "$version_file" +rm "./pcheck.lock" +# Execute package check by replacement of this process +exec "./package_check.sh" "${arguments[@]}" +EOF + + # Give the execution right + chmod +x "./upgrade_script.sh" + + # Start the upgrade script by replacement of this process + exec "./upgrade_script.sh" + fi + fi + + # Update the version file + echo "$check_version" > "$version_file" +} + +#================================================= +# Upgrade Package linter +#================================================= + +function fetch_or_upgrade_package_linter() +{ + local git_repository=https://github.com/YunoHost/package_linter + local version_file="./.plinter_version" + + local check_version="$(git ls-remote $git_repository | cut -f 1 | head -n1)" + + # If the version file exist, check for an upgrade + if [ -e "$version_file" ] + then + # Check if the last commit on the repository match with the current version + if [ "$check_version" != "$(cat "$version_file")" ] + then + # If the versions don't matches. Do an upgrade + log_info "Upgrading Package linter" + + # Clone in another directory + git clone --quiet $git_repository "./package_linter_tmp" + pip3 install pyparsing six + + # And replace + cp -a "./package_linter_tmp/." "./package_linter/." + sudo rm -r "./package_linter_tmp" + fi + else + log_info "Installing Package linter" + git clone --quiet $git_repository "./package_linter" + pip3 install pyparsing six + fi + + # Update the version file + echo "$check_version" > "$version_file" +} + + diff --git a/sub_scripts/launcher.sh b/sub_scripts/launcher.sh index d497e68..c6d508f 100755 --- a/sub_scripts/launcher.sh +++ b/sub_scripts/launcher.sh @@ -1,69 +1,18 @@ # #!/bin/bash -echo -e "Loads functions from launcher.sh" - #================================================= # Globals variables #================================================= # -q aims to disable the display of 'Debian GNU/Linux' each time a command is ran arg_ssh="-tt -q" -snapshot_path="/var/lib/lxcsnaps/$lxc_name" current_snapshot=snap0 -#================================================= -# TIMER -#================================================= - -start_timer () { - # Set the beginning of the timer - starttime=$(date +%s) -} - -stop_timer () { - # Ending the timer - # $1 = Type of querying - - local finishtime=$(date +%s) - # Calculate the gap between the starting and the ending of the timer - local elapsedtime=$(echo $(( $finishtime - $starttime ))) - # Extract the number of hour - local hours=$(echo $(( $elapsedtime / 3600 ))) - local elapsedtime=$(echo $(( $elapsedtime - ( 3600 * $hours) ))) - # Minutes - local minutes=$(echo $(( $elapsedtime / 60 ))) - # And seconds - local seconds=$(echo $(( $elapsedtime - ( 60 * $minutes) ))) - - local phours="" - local pminutes="" - local pseconds="" - - # Avoid null values - [ $hours -eq 0 ] || phours="$hours hour" - [ $minutes -eq 0 ] || pminutes="$minutes minute" - [ $seconds -eq 0 ] || pseconds="$seconds second" - - # Add a 's' for plural values - [ $hours -eq 1 ] && phours="${phours}, " || test -z "$phours" || phours="${phours}s, " - [ $minutes -eq 1 ] && pminutes="${pminutes}, " || test -z "$pminutes" || pminutes="${pminutes}s, " - [ $seconds -gt 1 ] && pseconds="${pseconds}s" - - time="${phours}${pminutes}${pseconds} ($(date '+%T'))" - if [ $1 -eq 2 ]; then - info "Working time for this test: $time" - elif [ $1 -eq 3 ]; then - info "Global working time for all tests: $time" - else - info "Working time: $time" >/dev/null - fi -} - #================================================= # RUNNING SNAPSHOT #================================================= -create_temp_backup () { +CREATE_LXC_SNAPSHOT () { # Create a temporary snapshot # snap1 for subpath or snap2 for root install @@ -74,10 +23,10 @@ create_temp_backup () { check_witness_files >&2 # Stop the container, before its snapshot - sudo lxc-stop --name $lxc_name >&2 + sudo lxc-stop --name $LXC_NAME >&2 # Remove swap files to avoid killing the CI with huge snapshots. - local swap_file="/var/lib/lxc/$lxc_name/rootfs/swap_$ynh_app_id" + local swap_file="$LXC_ROOTFS/swap_$app_id" if sudo test -e "$swap_file" then sudo swapoff "$swap_file" @@ -85,23 +34,23 @@ create_temp_backup () { fi # Check if the snapshot already exist - if [ ! -e "$snapshot_path/snap$snap_number" ] + if [ ! -e "$LXC_SNAPSHOTS/snap$snap_number" ] then - echo "snap$snap_number doesn't exist, its first creation can takes a little while." >&2 + log_debug "snap$snap_number doesn't exist, its first creation can takes a little while." >&2 # Create the snapshot. - sudo lxc-snapshot --name $lxc_name >> "$complete_log" 2>&1 + sudo lxc-snapshot --name $LXC_NAME >> "$complete_log" 2>&1 # lxc always creates the first snapshot it can creates. # So if snap1 doesn't exist and you try to create snap2, it will be named snap1. - if [ "$snap_number" == "2" ] && [ ! -e "$snapshot_path/snap2" ] + if [ "$snap_number" == "2" ] && [ ! -e "$LXC_SNAPSHOTS/snap2" ] then # Rename snap1 to snap2 - sudo mv "$snapshot_path/snap1" "$snapshot_path/snap2" + sudo mv "$LXC_SNAPSHOTS/snap1" "$LXC_SNAPSHOTS/snap2" fi fi # Update the snapshot with rsync to clone the current lxc state - sudo rsync --acls --archive --delete --executability --itemize-changes --xattrs "/var/lib/lxc/$lxc_name/rootfs/" "$snapshot_path/snap$snap_number/rootfs/" > /dev/null 2>> "$complete_log" + sudo rsync --acls --archive --delete --executability --itemize-changes --xattrs "$LXC_ROOTFS/" "$LXC_SNAPSHOTS/snap$snap_number/rootfs/" > /dev/null 2>> "$complete_log" # Set this snapshot as the current snapshot current_snapshot=snap$snap_number @@ -112,23 +61,20 @@ create_temp_backup () { LXC_START "true" >&2 } -use_temp_snapshot () { +LOAD_LXC_SNAPSHOT () { # Use a temporary snapshot, if it already exists # $1 = Name of the snapshot to use current_snapshot=$1 start_timer # Fix the missing hostname in the hosts file... - echo "127.0.0.1 $lxc_name" | sudo tee --append "$snapshot_path/$current_snapshot/rootfs/etc/hosts" > /dev/null + echo "127.0.0.1 $LXC_NAME" | sudo tee --append "$LXC_SNAPSHOTS/$current_snapshot/rootfs/etc/hosts" > /dev/null # Restore this snapshot. - sudo rsync --acls --archive --delete --executability --itemize-changes --xattrs "$snapshot_path/$current_snapshot/rootfs/" "/var/lib/lxc/$lxc_name/rootfs/" > /dev/null 2>> "$complete_log" + sudo rsync --acls --archive --delete --executability --itemize-changes --xattrs "$LXC_SNAPSHOTS/$current_snapshot/rootfs/" "$LXC_ROOTFS/" > /dev/null 2>> "$complete_log" stop_timer 1 - # Retrieve the app id in the log. To manage the app after - ynh_app_id=$(sudo tac "$yunohost_log" | grep --only-matching --max-count=1 "YNH_APP_INSTANCE_NAME=[^ ]*" | cut --delimiter='=' --fields=2) - # Fake the yunohost_result return code of the installation yunohost_result=0 } @@ -136,31 +82,31 @@ use_temp_snapshot () { #================================================= is_lxc_running () { - sudo lxc-info --name=$lxc_name | grep --quiet "RUNNING" + sudo lxc-info --name=$LXC_NAME | grep --quiet "RUNNING" } LXC_INIT () { # Clean previous remaining swap files - sudo swapoff /var/lib/lxc/$lxc_name/rootfs/swap_* 2>/dev/null - sudo rm --force /var/lib/lxc/$lxc_name/rootfs/swap_* - sudo swapoff /var/lib/lxcsnaps/$lxc_name/snap0/rootfs/swap_* 2>/dev/null - sudo rm --force /var/lib/lxcsnaps/$lxc_name/snap0/rootfs/swap_* - sudo swapoff /var/lib/lxcsnaps/$lxc_name/snap1/rootfs/swap_* 2>/dev/null - sudo rm --force /var/lib/lxcsnaps/$lxc_name/snap1/rootfs/swap_* - sudo swapoff /var/lib/lxcsnaps/$lxc_name/snap2/rootfs/swap_* 2>/dev/null - sudo rm --force /var/lib/lxcsnaps/$lxc_name/snap2/rootfs/swap_* + sudo swapoff $LXC_ROOTFS/swap_* 2>/dev/null + sudo rm --force $LXC_ROOTFS/swap_* + sudo swapoff $LXC_SNAPSHOTS/snap0/rootfs/swap_* 2>/dev/null + sudo rm --force $LXC_SNAPSHOTS/snap0/rootfs/swap_* + sudo swapoff $LXC_SNAPSHOTS/snap1/rootfs/swap_* 2>/dev/null + sudo rm --force $LXC_SNAPSHOTS/snap1/rootfs/swap_* + sudo swapoff $LXC_SNAPSHOTS/snap2/rootfs/swap_* 2>/dev/null + sudo rm --force $LXC_SNAPSHOTS/snap2/rootfs/swap_* # Initialize LXC network # Activate the bridge echo "Initialize network for LXC." - sudo ifup $lxc_bridge --interfaces=/etc/network/interfaces.d/$lxc_bridge | tee --append "$complete_log" 2>&1 + sudo ifup $LXC_BRIDGE --interfaces=/etc/network/interfaces.d/$LXC_BRIDGE | tee --append "$complete_log" 2>&1 # Activate iptables rules echo "Activate iptables rules." - sudo iptables --append FORWARD --in-interface $lxc_bridge --out-interface $main_iface --jump ACCEPT | tee --append "$complete_log" 2>&1 - sudo iptables --append FORWARD --in-interface $main_iface --out-interface $lxc_bridge --jump ACCEPT | tee --append "$complete_log" 2>&1 - sudo iptables --table nat --append POSTROUTING --source $ip_range.0/24 --jump MASQUERADE | tee --append "$complete_log" 2>&1 + sudo iptables --append FORWARD --in-interface $LXC_BRIDGE --out-interface $MAIN_NETWORK_INTERFACE --jump ACCEPT | tee --append "$complete_log" 2>&1 + sudo iptables --append FORWARD --in-interface $MAIN_NETWORK_INTERFACE --out-interface $LXC_BRIDGE --jump ACCEPT | tee --append "$complete_log" 2>&1 + sudo iptables --table nat --append POSTROUTING --source $LXC_NETWORK.0/24 --jump MASQUERADE | tee --append "$complete_log" 2>&1 } LXC_START () { @@ -174,14 +120,14 @@ LXC_START () { while [ $i -lt $max_try ] do i=$(( $i +1 )) - # Start the container and log the booting process in $script_dir/lxc_boot.log + # Start the container and log the booting process in ./lxc_boot.log # Try to start only if the container is not already started if ! is_lxc_running; then - debug "Start the LXC container" >> "$complete_log" - sudo lxc-start --name=$lxc_name --daemon --logfile "$script_dir/lxc_boot.log" | tee --append "$complete_log" 2>&1 + log_debug "Start the LXC container" >> "$complete_log" + sudo lxc-start --name=$LXC_NAME --daemon --logfile "./lxc_boot.log" | tee --append "$complete_log" 2>&1 local avoid_witness=0 else - debug "A LXC container is already running" + log_debug "A LXC container is already running" local avoid_witness=1 fi @@ -189,9 +135,9 @@ LXC_START () { local j=0 for j in `seq 1 5` do - debug "." >> "$complete_log" + log_debug "." >> "$complete_log" # Try to connect with ssh to check if the container is ready to work. - if ssh $arg_ssh -o ConnectTimeout=10 $lxc_name "exit 0" > /dev/null 2>&1; then + if ssh $arg_ssh -o ConnectTimeout=10 $LXC_NAME "exit 0" > /dev/null 2>&1; then # Break the for loop if the container is ready. break fi @@ -206,19 +152,19 @@ LXC_START () { local failstart=0 # Check if the container is running if ! is_lxc_running; then - critical "The LXC container didn't start..." + log_critical "The LXC container didn't start..." failstart=1 if [ $i -ne $max_try ]; then - info "Rebooting the container..." + log_info "Rebooting the container..." fi LXC_STOP # Try to ping security.debian.org to check the connectivity from the container - elif ! ssh $arg_ssh -o ConnectTimeout=60 $lxc_name "sudo ping -q -c 2 security.debian.org > /dev/null 2>&1; exit \$?" >> "$complete_log" 2>&1 + elif ! ssh $arg_ssh -o ConnectTimeout=60 $LXC_NAME "sudo ping -q -c 2 security.debian.org > /dev/null 2>&1; exit \$?" >> "$complete_log" 2>&1 then - critical "The container failed to connect to internet..." + log_critical "The container failed to connect to internet..." failstart=1 if [ $i -ne $max_try ]; then - info "Rebooting the container..." + log_info "Rebooting the container..." fi LXC_STOP # Create files to check if the remove script does not remove them accidentally @@ -236,55 +182,55 @@ LXC_START () { # Send an email only if it's a CI environment if [ $type_exec_env -ne 0 ] then - ci_path=$(grep "CI_URL=" "$script_dir/../config" | cut -d= -f2) + ci_path=$(grep "CI_URL=" "./../config" | cut -d= -f2) local subject="[YunoHost] Container in trouble on $ci_path." local message="The container failed to start $max_try times on $ci_path. $lxc_check_result Please have a look to the log of lxc_check: - $(cat "$script_dir/lxc_check.log")" + $(cat "./lxc_check.log")" if [ $lxc_check -eq 2 ]; then # Add the log of lxc_build message="$message Here the log of lxc_build: - $(cat "$script_dir/sub_scripts/Build_lxc.log")" + $(cat "./sub_scripts/Build_lxc.log")" fi - dest=$(grep 'dest=' "$script_dir/../config" | cut -d= -f2) + dest=$(grep 'dest=' "./../config" | cut -d= -f2) mail -s "$subject" "$dest" <<< "$message" fi } - critical "The container failed to start $max_try times..." - info "Boot log:\n" - cat "$script_dir/lxc_boot.log" | tee --append "$complete_log" - info "lxc_check will try to fix the container..." - $script_dir/sub_scripts/lxc_check.sh --no-lock | tee "$script_dir/lxc_check.log" + log_critical "The container failed to start $max_try times..." + log_info "Boot log:\n" + cat "./lxc_boot.log" | tee --append "$complete_log" + log_info "lxc_check will try to fix the container..." + ./sub_scripts/lxc_check.sh --no-lock | tee "./lxc_check.log" # PIPESTATUS is an array with the exit code of each command followed by a pipe local lxc_check=${PIPESTATUS[0]} LXC_INIT if [ $lxc_check -eq 0 ]; then local lxc_check_result="The container seems to be ok, according to lxc_check." - success "$lxc_check_result" + log_success "$lxc_check_result" send_email i=0 elif [ $lxc_check -eq 1 ]; then local lxc_check_result="An error has happened with the host. Please check the configuration." - critical "$lxc_check_result" + log_critical "$lxc_check_result" send_email stop_timer 1 return 1 elif [ $lxc_check -eq 2 ]; then local lxc_check_result="The container is broken, it will be rebuilt." - critical "$lxc_check_result" - $script_dir/sub_scripts/lxc_build.sh + log_critical "$lxc_check_result" + ./sub_scripts/lxc_build.sh LXC_INIT send_email i=0 elif [ $lxc_check -eq 3 ]; then local lxc_check_result="The container has been fixed by lxc_check." - success "$lxc_check_result" + log_success "$lxc_check_result" send_email i=0 fi @@ -294,10 +240,10 @@ LXC_START () { start_timer # Copy the package into the container. - rsync -rq --delete "$package_path" "$lxc_name": >> "$complete_log" 2>&1 + rsync -rq --delete "$package_path" "$LXC_NAME": >> "$complete_log" 2>&1 # Execute the command given in argument in the container and log its results. - ssh $arg_ssh $lxc_name "$1; exit $?" | tee -a "$complete_log" + ssh $arg_ssh $LXC_NAME "$1; exit $?" | tee -a "$complete_log" # Store the return code of the command local returncode=${PIPESTATUS[0]} @@ -313,68 +259,54 @@ LXC_STOP () { start_timer # Stop the LXC container if is_lxc_running; then - debug "Stop the LXC container" - sudo lxc-stop --name=$lxc_name | tee --append "$complete_log" 2>&1 + log_debug "Stop the LXC container" + sudo lxc-stop --name=$LXC_NAME | tee --append "$complete_log" 2>&1 fi # Fix the missing hostname in the hosts file # If the hostname is missing in /etc/hosts inside the snapshot - if ! sudo grep --quiet "$lxc_name" "$snapshot_path/$current_snapshot/rootfs/etc/hosts" + if ! sudo grep --quiet "$LXC_NAME" "$LXC_SNAPSHOTS/$current_snapshot/rootfs/etc/hosts" then # If the hostname was replaced by name of the snapshot, fix it - if sudo grep --quiet "$current_snapshot" "$snapshot_path/$current_snapshot/rootfs/etc/hosts" + if sudo grep --quiet "$current_snapshot" "$LXC_SNAPSHOTS/$current_snapshot/rootfs/etc/hosts" then # Replace snapX by the real hostname - sudo sed --in-place "s/$current_snapshot/$lxc_name/" "$snapshot_path/$current_snapshot/rootfs/etc/hosts" + sudo sed --in-place "s/$current_snapshot/$LXC_NAME/" "$LXC_SNAPSHOTS/$current_snapshot/rootfs/etc/hosts" else # Otherwise, simply add the hostname - echo "127.0.0.1 $lxc_name" | sudo tee --append "$snapshot_path/$current_snapshot/rootfs/etc/hosts" > /dev/null + echo "127.0.0.1 $LXC_NAME" | sudo tee --append "$LXC_SNAPSHOTS/$current_snapshot/rootfs/etc/hosts" > /dev/null fi fi # Restore the snapshot. - debug "Restore the previous snapshot." - sudo rsync --acls --archive --delete --executability --itemize-changes --xattrs "$snapshot_path/$current_snapshot/rootfs/" "/var/lib/lxc/$lxc_name/rootfs/" > /dev/null 2>> "$complete_log" + log_debug "Restore the previous snapshot." + sudo rsync --acls --archive --delete --executability --itemize-changes --xattrs "$LXC_SNAPSHOTS/$current_snapshot/rootfs/" "$LXC_ROOTFS/" > /dev/null 2>> "$complete_log" stop_timer 1 } LXC_TURNOFF () { # Disable LXC network - echo "Disable iptables rules." - if sudo iptables --check FORWARD --in-interface $lxc_bridge --out-interface $main_iface --jump ACCEPT 2> /dev/null + log_debug "Disable iptables rules." + if sudo iptables --check FORWARD --in-interface $LXC_BRIDGE --out-interface $MAIN_NETWORK_INTERFACE --jump ACCEPT 2> /dev/null then - sudo iptables --delete FORWARD --in-interface $lxc_bridge --out-interface $main_iface --jump ACCEPT >> "$complete_log" 2>&1 + sudo iptables --delete FORWARD --in-interface $LXC_BRIDGE --out-interface $MAIN_NETWORK_INTERFACE --jump ACCEPT >> "$complete_log" 2>&1 fi - if sudo iptables --check FORWARD --in-interface $main_iface --out-interface $lxc_bridge --jump ACCEPT 2> /dev/null + if sudo iptables --check FORWARD --in-interface $MAIN_NETWORK_INTERFACE --out-interface $LXC_BRIDGE --jump ACCEPT 2> /dev/null then - sudo iptables --delete FORWARD --in-interface $main_iface --out-interface $lxc_bridge --jump ACCEPT | tee --append "$complete_log" 2>&1 + sudo iptables --delete FORWARD --in-interface $MAIN_NETWORK_INTERFACE --out-interface $LXC_BRIDGE --jump ACCEPT | tee --append "$complete_log" 2>&1 fi - if sudo iptables --table nat --check POSTROUTING --source $ip_range.0/24 --jump MASQUERADE 2> /dev/null + if sudo iptables --table nat --check POSTROUTING --source $LXC_NETWORK.0/24 --jump MASQUERADE 2> /dev/null then - sudo iptables --table nat --delete POSTROUTING --source $ip_range.0/24 --jump MASQUERADE | tee --append "$complete_log" 2>&1 + sudo iptables --table nat --delete POSTROUTING --source $LXC_NETWORK.0/24 --jump MASQUERADE | tee --append "$complete_log" 2>&1 fi - echo "Disable the network bridge." - if sudo ifquery $lxc_bridge --state > /dev/null + log_debug "Disable the network bridge." + if sudo ifquery $LXC_BRIDGE --state > /dev/null then - sudo ifdown --force $lxc_bridge | tee --append "$complete_log" 2>&1 + sudo ifdown --force $LXC_BRIDGE | tee --append "$complete_log" 2>&1 fi # Set snap0 as the current snapshot current_snapshot=snap0 } - -LXC_CONNECT_INFO () { - # Print access information - - echo "> To access the container:" - echo "To execute one command:" - echo -e "\e[1msudo lxc-attach -n $lxc_name -- command\e[0m" - - echo "To establish a ssh connection:" - if [ $(cat "$script_dir/sub_scripts/setup_user") = "root" ]; then - echo -ne "\e[1msudo " - fi - echo -e "\e[1mssh $arg_ssh $lxc_name\e[0m" -} diff --git a/sub_scripts/lxc_build.sh b/sub_scripts/lxc_build.sh index a836712..5463220 100755 --- a/sub_scripts/lxc_build.sh +++ b/sub_scripts/lxc_build.sh @@ -8,286 +8,212 @@ then exit 1 fi -# Récupère le dossier du script -if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi +# Load configuration +dnsforce=1 -pcheck_config="$script_dir/../config" -# Tente de lire les informations depuis le fichier de config si il existe -if [ -e "$pcheck_config" ] -then - PLAGE_IP=$(cat "$pcheck_config" | grep PLAGE_IP= | cut -d '=' -f2) - DOMAIN=$(cat "$pcheck_config" | grep DOMAIN= | cut -d '=' -f2) - YUNO_PWD=$(cat "$pcheck_config" | grep YUNO_PWD= | cut -d '=' -f2) - LXC_NAME=$(cat "$pcheck_config" | grep LXC_NAME= | cut -d '=' -f2) - LXC_BRIDGE=$(cat "$pcheck_config" | grep LXC_BRIDGE= | cut -d '=' -f2) - dns=$(cat "$pcheck_config" | grep dns= | cut -d '=' -f2) - dnsforce=$(cat "$pcheck_config" | grep dnsforce= | cut -d '=' -f2) - main_iface=$(cat "$pcheck_config" | grep iface= | cut -d '=' -f2) - DISTRIB=$(cat "$pcheck_config" | grep DISTRIB= | cut -d '=' -f2) - branch=$(cat "$pcheck_config" | grep BRANCH= | cut -d '=' -f2) -fi +cd $(dirname $(realpath $0) | sed 's@/sub_scripts$@@g') +source "./sub_scripts/common.sh" -LOG_BUILD_LXC="$script_dir/Build_lxc.log" -# Utilise des valeurs par défaut si les variables sont vides. -test -n "$PLAGE_IP" || PLAGE_IP=10.1.4 -test -n "$DOMAIN" || DOMAIN=domain.tld -test -n "$YUNO_PWD" || YUNO_PWD=admin -test -n "$LXC_NAME" || LXC_NAME=pchecker_lxc -test -n "$LXC_BRIDGE" || LXC_BRIDGE=lxc-pchecker -test -n "$dnsforce" || dnsforce=1 -test -n "$DISTRIB" || DISTRIB=buster -test -n "$branch" || branch="" -ARG_SSH="-t" +LXC_BUILD() +{ + # Met en place le lock de Package check, le temps de l'installation + touch "$lock_file" + echo $(whoami) > "./.setup_user" -# Tente de définir l'interface réseau principale -if [ -z $main_iface ] # Si main_iface est vide, tente de le trouver. -then -# main_iface=$(sudo route | grep default.*0.0.0.0 -m1 | awk '{print $8;}') # Prend l'interface réseau défini par default - main_iface=$(sudo ip route | grep default | awk '{print $5;}') # Prend l'interface réseau défini par default - if [ -z $main_iface ]; then - echo -e "\e[91mImpossible de déterminer le nom de l'interface réseau de l'hôte.\e[0m" - exit 1 - fi -fi + log_title "Installing dependencies..." -if [ -z $dns ] # Si l'adresse du dns est vide, tente de le déterminer à partir de la passerelle par défaut. -then -# dns=$(sudo route -n | grep ^0.0.0.0.*$main_iface | awk '{print $2;}') - dns=$(sudo ip route | grep default | awk '{print $3;}') - if [ -z $dns ]; then - echo -e "\e[91mImpossible de déterminer l'adresse de la passerelle.\e[0m" - exit 1 - fi -fi + DEPENDENCIES="lxc lxctl git curl lynx jq python3-pip debootstrap rsync bridge-utils" + sudo apt-get update + sudo apt-get install -y $DEPENDENCIES -touch "$script_dir/../pcheck.lock" # Met en place le lock de Package check, le temps de l'installation + # Créer le dossier lxcsnaps, pour s'assurer que lxc utilisera ce dossier, même avec lxc 2. + sudo mkdir -p /var/lib/lxcsnaps -# Check user -echo $(whoami) > "$script_dir/setup_user" + # Si le conteneur existe déjà + if sudo lxc-info -n $LXC_NAME > /dev/null 2>&1 + then + log_title "Suppression du conteneur existant." + ./sub_scripts/lxc_remove.sh + fi -# Enregistre le nom de l'interface réseau de l'hôte dans un fichier de config -echo -e "# Interface réseau principale de l'hôte\niface=$main_iface\n" > "$pcheck_config" -echo -e "# Adresse du dns\ndns=$dns\n" >> "$pcheck_config" -echo -e "# Forçage du dns\ndnsforce=$dnsforce\n" >> "$pcheck_config" -# Enregistre les infos dans le fichier de config. -echo -e "# Plage IP du conteneur\nPLAGE_IP=$PLAGE_IP\n" >> "$pcheck_config" -echo -e "# Domaine de test\nDOMAIN=$DOMAIN\n" >> "$pcheck_config" -echo -e "# Mot de passe\nYUNO_PWD=$YUNO_PWD\n" >> "$pcheck_config" -echo -e "# Nom du conteneur\nLXC_NAME=$LXC_NAME\n" >> "$pcheck_config" -echo -e "# Nom du bridge\nLXC_BRIDGE=$LXC_BRIDGE\n" >> "$pcheck_config" -echo -e "# Distribution debian\nDISTRIB=$DISTRIB" >> "$pcheck_config" -echo -e "# Branche à utiliser pour le script d'install de cette distribution (si non standard)\nBRANCH=$branch\n" >> "$pcheck_config" + log_title "Création d'une machine debian $DISTRIB minimaliste." + sudo lxc-create -n $LXC_NAME -t download -- -d debian -r $DISTRIB -a $(dpkg --print-architecture) -echo -e "\e[1m> Update et install lxc lxctl\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo apt-get update >> "$LOG_BUILD_LXC" 2>&1 -sudo apt-get install -y lxc lxctl >> "$LOG_BUILD_LXC" 2>&1 + log_title "Autoriser l'ip forwarding, pour router vers la machine virtuelle." + echo "net.ipv4.ip_forward=1" | sudo tee /etc/sysctl.d/lxc_pchecker.conf + sudo sysctl -p /etc/sysctl.d/lxc_pchecker.conf -echo -e "\e[1m> Install git, curl and lynx\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo apt-get install -y git curl lynx jq python3-pip boxes >> "$LOG_BUILD_LXC" 2>&1 -# For those who have disabled APT::Install-Recommends we need to manually install the following packages. -sudo apt-get install -y debootstrap rsync bridge-utils >> "$LOG_BUILD_LXC" 2>&1 - -sudo mkdir -p /var/lib/lxcsnaps # Créer le dossier lxcsnaps, pour s'assurer que lxc utilisera ce dossier, même avec lxc 2. - -if sudo lxc-info -n $LXC_NAME > /dev/null 2>&1 -then # Si le conteneur existe déjà - echo -e "\e[1m> Suppression du conteneur existant.\e[0m" | tee -a "$LOG_BUILD_LXC" - "$script_dir/lxc_remove.sh" quiet | tee -a "$LOG_BUILD_LXC" -fi - -echo -e "\e[1m> Création d'une machine debian $DISTRIB minimaliste.\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo lxc-create -n $LXC_NAME -t download -- -d debian -r $DISTRIB -a $(dpkg --print-architecture) >> "$LOG_BUILD_LXC" 2>&1 - -echo -e "\e[1m> Autoriser l'ip forwarding, pour router vers la machine virtuelle.\e[0m" | tee -a "$LOG_BUILD_LXC" -echo "net.ipv4.ip_forward=1" | sudo tee /etc/sysctl.d/lxc_pchecker.conf >> "$LOG_BUILD_LXC" 2>&1 -sudo sysctl -p /etc/sysctl.d/lxc_pchecker.conf >> "$LOG_BUILD_LXC" 2>&1 - -echo -e "\e[1m> Ajoute un brige réseau pour la machine virtualisée\e[0m" | tee -a "$LOG_BUILD_LXC" -echo | sudo tee /etc/network/interfaces.d/$LXC_BRIDGE <> "$LOG_BUILD_LXC" 2>&1 + log_title "Ajoute un brige réseau pour la machine virtualisée" + echo | sudo tee /etc/network/interfaces.d/$LXC_BRIDGE < Active le bridge réseau\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo ifup $LXC_BRIDGE --interfaces=/etc/network/interfaces.d/$LXC_BRIDGE >> "$LOG_BUILD_LXC" 2>&1 + log_title "Active le bridge réseau" + sudo ifup $LXC_BRIDGE --interfaces=/etc/network/interfaces.d/$LXC_BRIDGE -echo -e "\e[1m> Configuration réseau du conteneur\e[0m" | tee -a "$LOG_BUILD_LXC" -if [ $(lsb_release -sc) != buster ] -then - sudo sed -i "s/^lxc.network.type = empty$/lxc.network.type = veth\nlxc.network.flags = up\nlxc.network.link = $LXC_BRIDGE\nlxc.network.name = eth0\nlxc.network.hwaddr = 00:FF:AA:00:00:01/" /var/lib/lxc/$LXC_NAME/config >> "$LOG_BUILD_LXC" 2>&1 -else - echo -e "lxc.net.0.type = veth\nlxc.net.0.flags = up\nlxc.net.0.link = $LXC_BRIDGE\nlxc.net.0.name = eth0\nlxc.net.0.hwaddr = 00:FF:AA:00:00:01" | sudo tee -a /var/lib/lxc/$LXC_NAME/config >> "$LOG_BUILD_LXC" 2>&1 -fi + log_title "Configuration réseau du conteneur" + if [ $(lsb_release -sc) != buster ] + then + sudo sed -i "s/^lxc.network.type = empty$/lxc.network.type = veth\nlxc.network.flags = up\nlxc.network.link = $LXC_BRIDGE\nlxc.network.name = eth0\nlxc.network.hwaddr = 00:FF:AA:00:00:01/" /var/lib/lxc/$LXC_NAME/config + else + echo -e "lxc.net.0.type = veth\nlxc.net.0.flags = up\nlxc.net.0.link = $LXC_BRIDGE\nlxc.net.0.name = eth0\nlxc.net.0.hwaddr = 00:FF:AA:00:00:01" | sudo tee -a /var/lib/lxc/$LXC_NAME/config + fi -echo -e "\e[1m> Configuration réseau de la machine virtualisée\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo sed -i "s@iface eth0 inet dhcp@iface eth0 inet static\n\taddress $PLAGE_IP.2/24\n\tgateway $PLAGE_IP.1@" /var/lib/lxc/$LXC_NAME/rootfs/etc/network/interfaces >> "$LOG_BUILD_LXC" 2>&1 + log_title "Configuration réseau de la machine virtualisée" + sudo sed -i "s@iface eth0 inet dhcp@iface eth0 inet static\n\taddress $LXC_NETWORK.2/24\n\tgateway $LXC_NETWORK.1@" $LXC_ROOTFS/etc/network/interfaces -echo -e "\e[1m> Configure le parefeu\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo iptables -A FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT >> "$LOG_BUILD_LXC" 2>&1 -sudo iptables -A FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT >> "$LOG_BUILD_LXC" 2>&1 -sudo iptables -t nat -A POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE >> "$LOG_BUILD_LXC" 2>&1 + log_title "Configure le parefeu" + sudo iptables -A FORWARD -i $LXC_BRIDGE -o $MAIN_NETWORK_INTERFACE -j ACCEPT + sudo iptables -A FORWARD -i $MAIN_NETWORK_INTERFACE -o $LXC_BRIDGE -j ACCEPT + sudo iptables -t nat -A POSTROUTING -s $LXC_NETWORK.0/24 -j MASQUERADE -echo -e "\e[1m> Vérification du contenu du resolv.conf\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo cp -a /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf.origin -if ! sudo cat /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf | grep -q nameserver; then - dnsforce=1 # Le resolv.conf est vide, on force l'ajout d'un dns. - sed -i "s/dnsforce=.*/dnsforce=$dnsforce/" "$pcheck_config" -fi -if [ $dnsforce -eq 1 ]; then # Force la réécriture du resolv.conf - echo "nameserver $dns" | sudo tee /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf -fi + log_title "Vérification du contenu du resolv.conf" + sudo cp -a $LXC_ROOTFS/etc/resolv.conf $LXC_ROOTFS/etc/resolv.conf.origin + if ! sudo cat $LXC_ROOTFS/etc/resolv.conf | grep -q nameserver; then + dnsforce=1 # Le resolv.conf est vide, on force l'ajout d'un dns. + fi + if [ $dnsforce -eq 1 ]; then # Force la réécriture du resolv.conf + echo "nameserver $DNS_RESOLVER" | sudo tee $LXC_ROOTFS/etc/resolv.conf + fi -# Fix an issue with apparmor when the container start. -if [ $(lsb_release -sc) != buster ] -then - echo -e "\n# Fix apparmor issues\nlxc.aa_profile = unconfined" | sudo tee -a /var/lib/lxc/$LXC_NAME/config >> "$LOG_BUILD_LXC" 2>&1 -else - echo -e "\n# Fix apparmor issues\nlxc.apparmor.profile = unconfined" | sudo tee -a /var/lib/lxc/$LXC_NAME/config >> "$LOG_BUILD_LXC" 2>&1 -fi + # Fix an issue with apparmor when the container start. + if [ $(lsb_release -sc) != buster ] + then + echo -e "\n# Fix apparmor issues\nlxc.aa_profile = unconfined" | sudo tee -a /var/lib/lxc/$LXC_NAME/config + else + echo -e "\n# Fix apparmor issues\nlxc.apparmor.profile = unconfined" | sudo tee -a /var/lib/lxc/$LXC_NAME/config + fi -echo -e "\e[1m> Démarrage de la machine\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo lxc-start -n $LXC_NAME -d --logfile "$script_dir/lxc_boot.log" >> "$LOG_BUILD_LXC" 2>&1 -sleep 3 -sudo lxc-ls -f >> "$LOG_BUILD_LXC" 2>&1 + log_title "Démarrage de la machine" + sudo lxc-start -n $LXC_NAME -d --logfile "./lxc_boot.log" + sleep 3 + sudo lxc-ls -f -echo -e "\e[1m> Test la configuration dns\e[0m" | tee -a "$LOG_BUILD_LXC" -broken_dns=0 -while ! sudo lxc-attach -n $LXC_NAME -- getent hosts debian.org > /dev/null 2>&1 -do - echo -e "\e[1m>>> The dns isn't working (Current dns = $(sudo cat /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf | grep nameserver | awk '{print $2}'))" + log_title "Test la configuration dns" + broken_dns=0 + while ! RUN_INSIDE_LXC getent hosts debian.org + do + log_info "The dns isn't working (Current dns = $(sudo cat $LXC_ROOTFS/etc/resolv.conf | grep nameserver | awk '{print $2}'))" - if [ $broken_dns -eq 2 ] - then - echo -e "\e[1m>>>The dns is still broken, use FDN dns\e[0m" | tee -a "$LOG_BUILD_LXC" - echo "nameserver 80.67.169.12" | sudo tee /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf - dnsforce=0 - ((broken_dns++)) - elif [ $dnsforce -eq 0 ] - then - echo -e "\e[1m>>>Force to use the dns from the config file\e[0m" | tee -a "$LOG_BUILD_LXC" - echo "nameserver $dns" | sudo tee /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf - new_dns="$dns" - dnsforce=1 - ((broken_dns++)) - else - echo -e "\e[1m>>>Force to use the default dns\e[0m" | tee -a "$LOG_BUILD_LXC" - sudo cp -a /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf.origin /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf - new_dns="$(sudo cat /var/lib/lxc/$LXC_NAME/rootfs/etc/resolv.conf | grep nameserver | awk '{print $2}')" - dnsforce=0 - ((broken_dns++)) - fi - echo -e "\e[1m>>> Try to use the dns address $new_dns\e[0m" | tee -a "$LOG_BUILD_LXC" + if [ $broken_dns -eq 2 ] + then + log_info "The dns is still broken, use FDN dns" + echo "nameserver 80.67.169.12" | sudo tee $LXC_ROOTFS/etc/resolv.conf + dnsforce=0 + ((broken_dns++)) + elif [ $dnsforce -eq 0 ] + then + log_info "Force to use the dns from the config file" + echo "nameserver $DNS_RESOLVER" | sudo tee $LXC_ROOTFS/etc/resolv.conf + new_dns="$DNS_RESOLVER" + dnsforce=1 + ((broken_dns++)) + else + log_info "Force to use the default dns" + sudo cp -a $LXC_ROOTFS/etc/resolv.conf.origin $LXC_ROOTFS/etc/resolv.conf + new_dns="$(sudo cat $LXC_ROOTFS/etc/resolv.conf | grep nameserver | awk '{print $2}')" + dnsforce=0 + ((broken_dns++)) + fi + log_info "Try to use the dns address $new_dns" - # Change the value of dnsforce into the config file - sed -i "s/dnsforce=.*/dnsforce=$dnsforce/" "$pcheck_config" + if [ $broken_dns -eq 3 ]; then + # Break the loop if all the possibilities have been tried. + break + fi + done - if [ $broken_dns -eq 3 ]; then - # Break the loop if all the possibilities have been tried. - break - fi -done + log_title "Update et install aptitude sudo git" + RUN_INSIDE_LXC apt-get update + RUN_INSIDE_LXC apt-get install -y sudo git ssh openssh-server -echo -e "\e[1m> Update et install aptitude sudo git\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo lxc-attach -n $LXC_NAME -- apt-get update -sudo lxc-attach -n $LXC_NAME -- apt-get install -y aptitude sudo git ssh openssh-server -echo -e "\e[1m> Installation des paquets standard et ssh-server\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo lxc-attach -n $LXC_NAME -- aptitude install -y ~pstandard ~prequired ~pimportant + log_title "Renseigne /etc/hosts sur l'invité" + echo "127.0.0.1 $LXC_NAME" | sudo tee -a $LXC_ROOTFS/etc/hosts -echo -e "\e[1m> Renseigne /etc/hosts sur l'invité\e[0m" | tee -a "$LOG_BUILD_LXC" -echo "127.0.0.1 $LXC_NAME" | sudo tee -a /var/lib/lxc/$LXC_NAME/rootfs/etc/hosts >> "$LOG_BUILD_LXC" 2>&1 + log_title "Ajoute l'user pchecker" + RUN_INSIDE_LXC useradd -m -p pchecker pchecker -echo -e "\e[1m> Ajoute l'user pchecker\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo lxc-attach -n $LXC_NAME -- useradd -m -p pchecker pchecker >> "$LOG_BUILD_LXC" 2>&1 + log_title "Autorise pchecker à utiliser sudo sans mot de passe" + echo "pchecker ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a $LXC_ROOTFS/etc/sudoers -echo -e "\e[1m> Autorise pchecker à utiliser sudo sans mot de passe\e[0m" | tee -a "$LOG_BUILD_LXC" -echo "pchecker ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee -a /var/lib/lxc/$LXC_NAME/rootfs/etc/sudoers >> "$LOG_BUILD_LXC" 2>&1 + log_title "Mise en place de la connexion ssh vers l'invité." + if [ -e $HOME/.ssh/$LXC_NAME ]; then + rm -f $HOME/.ssh/$LXC_NAME $HOME/.ssh/$LXC_NAME.pub + ssh-keygen -f $HOME/.ssh/known_hosts -R $LXC_NETWORK.2 + fi + ssh-keygen -t rsa -f $HOME/.ssh/$LXC_NAME -P '' + sudo mkdir $LXC_ROOTFS/home/pchecker/.ssh + sudo cp $HOME/.ssh/$LXC_NAME.pub $LXC_ROOTFS/home/pchecker/.ssh/authorized_keys + RUN_INSIDE_LXC chown pchecker: -R /home/pchecker/.ssh -echo -e "\e[1m> Mise en place de la connexion ssh vers l'invité.\e[0m" | tee -a "$LOG_BUILD_LXC" -if [ -e $HOME/.ssh/$LXC_NAME ]; then - rm -f $HOME/.ssh/$LXC_NAME $HOME/.ssh/$LXC_NAME.pub - ssh-keygen -f $HOME/.ssh/known_hosts -R $PLAGE_IP.2 -fi -ssh-keygen -t rsa -f $HOME/.ssh/$LXC_NAME -P '' >> "$LOG_BUILD_LXC" 2>&1 -sudo mkdir /var/lib/lxc/$LXC_NAME/rootfs/home/pchecker/.ssh >> "$LOG_BUILD_LXC" 2>&1 -sudo cp $HOME/.ssh/$LXC_NAME.pub /var/lib/lxc/$LXC_NAME/rootfs/home/pchecker/.ssh/authorized_keys >> "$LOG_BUILD_LXC" 2>&1 -sudo lxc-attach -n $LXC_NAME -- chown pchecker: -R /home/pchecker/.ssh >> "$LOG_BUILD_LXC" 2>&1 - -echo | tee -a $HOME/.ssh/config <> "$LOG_BUILD_LXC" 2>&1 + echo | tee -a $HOME/.ssh/config <> ~/.ssh/known_hosts -ssh $ARG_SSH $LXC_NAME "exit 0" # Initie une premier connexion SSH pour valider la clé. -if [ "$?" -ne 0 ]; then # Si l'utilisateur tarde trop, la connexion sera refusée... ??? - ssh $ARG_SSH $LXC_NAME "exit 0" # Initie une premier connexion SSH pour valider la clé. -fi + ssh-keyscan -H $LXC_NETWORK.2 >> ~/.ssh/known_hosts + # Initie une premier connexion SSH pour valider la clé. + RUN_THROUGH_SSH "exit 0" + # Si l'utilisateur tarde trop, la connexion sera refusée... ??? + [ "$?" -ne 0 ] && RUN_THROUGH_SSH "exit 0" -# Fix ssh common issues with stretch "No supported key exchange algorithms" -sudo lxc-attach -n $LXC_NAME -- dpkg-reconfigure openssh-server >> "$LOG_BUILD_LXC" 2>&1 + [ -n "$YNH_INSTALL_SCRIPT_BRANCH" ] && YNH_INSTALL_SCRIPT_BRANCH="--branch $YNH_INSTALL_SCRIPT_BRANCH" -# Fix locales issue -sudo lxc-attach -n $LXC_NAME -- locale-gen en_US.UTF-8 >> "$LOG_BUILD_LXC" 2>&1 -sudo lxc-attach -n $LXC_NAME -- localedef -i en_US -f UTF-8 en_US.UTF-8 >> "$LOG_BUILD_LXC" 2>&1 - -if [ -n "$branch" ]; then - branch="--branch $branch" -fi - -ssh $ARG_SSH $LXC_NAME "git clone https://github.com/YunoHost/install_script $branch /tmp/install_script" >> "$LOG_BUILD_LXC" 2>&1 -echo -e "\e[1m> Installation de Yunohost...\e[0m" | tee -a "$LOG_BUILD_LXC" -ssh $ARG_SSH $LXC_NAME "cd /tmp/install_script; sudo ./install_yunohost -a" | tee -a "$LOG_BUILD_LXC" 2>&1 -echo -e "\e[1m> Disable apt-daily to prevent it from messing with apt/dpkg lock\e[0m" | tee -a "$LOG_BUILD_LXC" -ssh $ARG_SSH $LXC_NAME "systemctl -q stop apt-daily.timer" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "systemctl -q stop apt-daily-upgrade.timer" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "systemctl -q stop apt-daily.service" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "systemctl -q stop apt-daily-upgrade.service" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "systemctl -q disable apt-daily.timer" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "systemctl -q disable apt-daily-upgrade.timer" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "systemctl -q disable apt-daily.service" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "systemctl -q disable apt-daily-upgrade.service" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "rm -f /etc/cron.daily/apt-compat" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "cp /bin/true /usr/lib/apt/apt.systemd.daily" | tee -a "$LOG_BUILD_LXC" 2>&1 + RUN_THROUGH_SSH git clone https://github.com/YunoHost/install_script $YNH_INSTALL_SCRIPT_BRANCH /tmp/install_script + log_title "Installation de Yunohost..." + RUN_THROUGH_SSH bash /tmp/install_script/install_yunohost -a + log_title "Disable apt-daily to prevent it from messing with apt/dpkg lock" + RUN_THROUGH_SSH systemctl -q stop apt-daily.timer + RUN_THROUGH_SSH systemctl -q stop apt-daily-upgrade.timer + RUN_THROUGH_SSH systemctl -q stop apt-daily.service + RUN_THROUGH_SSH systemctl -q stop apt-daily-upgrade.service + RUN_THROUGH_SSH systemctl -q disable apt-daily.timer + RUN_THROUGH_SSH systemctl -q disable apt-daily-upgrade.timer + RUN_THROUGH_SSH systemctl -q disable apt-daily.service + RUN_THROUGH_SSH systemctl -q disable apt-daily-upgrade.service + RUN_THROUGH_SSH rm -f /etc/cron.daily/apt-compat + RUN_THROUGH_SSH cp /bin/true /usr/lib/apt/apt.systemd.daily -echo -e "\e[1m> Post install Yunohost\e[0m" | tee -a "$LOG_BUILD_LXC" -ssh $ARG_SSH $LXC_NAME "sudo yunohost tools postinstall --domain $DOMAIN --password $YUNO_PWD --force-password" | tee -a "$LOG_BUILD_LXC" 2>&1 + log_title "Post install Yunohost" + RUN_THROUGH_SSH yunohost tools postinstall --domain $DOMAIN --password $YUNO_PWD --force-password -# Disable password strength check -ssh $ARG_SSH $LXC_NAME "sudo yunohost settings set security.password.admin.strength -v -1" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "sudo yunohost settings set security.password.user.strength -v -1" | tee -a "$LOG_BUILD_LXC" 2>&1 + # Disable password strength check + RUN_THROUGH_SSH yunohost settings set security.password.admin.strength -v -1 + RUN_THROUGH_SSH yunohost settings set security.password.user.strength -v -1 -USER_TEST=$(cat "$(dirname "$script_dir")/package_check.sh" | grep test_user= | cut -d '=' -f2) -SOUS_DOMAIN="sous.$DOMAIN" -# echo "Le mot de passe Yunohost est \'$YUNO_PWD\'" -echo -e "\e[1m> Ajout du sous domaine de test\e[0m" | tee -a "$LOG_BUILD_LXC" -ssh $ARG_SSH $LXC_NAME "sudo yunohost domain add \"$SOUS_DOMAIN\"" -USER_TEST_CLEAN=${USER_TEST//"_"/""} -echo -e "\e[1m> Ajout de l'utilisateur de test\e[0m" | tee -a "$LOG_BUILD_LXC" -ssh $ARG_SSH $LXC_NAME "sudo yunohost user create --firstname \"$USER_TEST_CLEAN\" --mail \"$USER_TEST_CLEAN@$DOMAIN\" --lastname \"$USER_TEST_CLEAN\" --password \"$YUNO_PWD\" \"$USER_TEST\"" + # echo "Le mot de passe Yunohost est \'$YUNO_PWD\'" + log_title "Ajout du sous domaine de test" + RUN_THROUGH_SSH yunohost domain add $SUBDOMAIN + TEST_USER_DISPLAY=${TEST_USER//"_"/""} + log_title "Ajout de l'utilisateur de test" + RUN_THROUGH_SSH yunohost user create $TEST_USER --firstname $TEST_USER_DISPLAY --mail $TEST_USER@$DOMAIN --lastname $TEST_USER_DISPLAY --password \"$YUNO_PWD\" -echo -e -e "\e[1m\n> Vérification de l'état de Yunohost\e[0m" | tee -a "$LOG_BUILD_LXC" -ssh $ARG_SSH $LXC_NAME "sudo yunohost -v" | tee -a "$LOG_BUILD_LXC" 2>&1 + log_title "Vérification de l'état de Yunohost" + RUN_THROUGH_SSH yunohost --version + log_title "Arrêt de la machine virtualisée" + sudo lxc-stop -n $LXC_NAME -echo -e "\e[1m> Arrêt de la machine virtualisée\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo lxc-stop -n $LXC_NAME >> "$LOG_BUILD_LXC" 2>&1 + log_title "Suppression des règles de parefeu" + sudo iptables -D FORWARD -i $LXC_BRIDGE -o $MAIN_NETWORK_INTERFACE -j ACCEPT + sudo iptables -D FORWARD -i $MAIN_NETWORK_INTERFACE -o $LXC_BRIDGE -j ACCEPT + sudo iptables -t nat -D POSTROUTING -s $LXC_NETWORK.0/24 -j MASQUERADE + sudo ifdown --force $LXC_BRIDGE -echo -e "\e[1m> Suppression des règles de parefeu\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo iptables -D FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT >> "$LOG_BUILD_LXC" 2>&1 -sudo iptables -D FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT >> "$LOG_BUILD_LXC" 2>&1 -sudo iptables -t nat -D POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE >> "$LOG_BUILD_LXC" 2>&1 -sudo ifdown --force $LXC_BRIDGE >> "$LOG_BUILD_LXC" 2>&1 + log_title "Création d'un snapshot" + sudo lxc-snapshot -n $LXC_NAME + # Il sera nommé snap0 et stocké dans /var/lib/lxcsnaps/$LXC_NAME/snap0/ -echo -e "\e[1m> Création d'un snapshot\e[0m" | tee -a "$LOG_BUILD_LXC" -sudo lxc-snapshot -n $LXC_NAME >> "$LOG_BUILD_LXC" 2>&1 -# Il sera nommé snap0 et stocké dans /var/lib/lxcsnaps/$LXC_NAME/snap0/ + rm "$lock_file" +} -sudo rm "$script_dir/../pcheck.lock" # Retire le lock +LXC_BUILD 2>&1 | tee -a "./Build_lxc.log" diff --git a/sub_scripts/lxc_check.sh b/sub_scripts/lxc_check.sh index a82c667..0d5ad80 100755 --- a/sub_scripts/lxc_check.sh +++ b/sub_scripts/lxc_check.sh @@ -1,41 +1,18 @@ #!/bin/bash - # Test différents aspect du conteneur pour chercher d'éventuelles erreurs. # Et tente de réparer si possible... -# Récupère le dossier du script -if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi +cd $(dirname $(realpath $0) | sed 's@/sub_scripts$@@g') +source "./sub_scripts/common.sh" no_lock=0 if [ "$1" == "--no-lock" ]; then no_lock=1 fi -ARG_SSH="-t" -# Récupère les informations depuis le fichier de conf (Ou le complète le cas échéant) -pcheck_config="$script_dir/../config" -# Tente de lire les informations depuis le fichier de config si il existe -if [ -e "$pcheck_config" ] -then - PLAGE_IP=$(cat "$pcheck_config" | grep PLAGE_IP= | cut -d '=' -f2) - DOMAIN=$(cat "$pcheck_config" | grep DOMAIN= | cut -d '=' -f2) - YUNO_PWD=$(cat "$pcheck_config" | grep YUNO_PWD= | cut -d '=' -f2) - LXC_NAME=$(cat "$pcheck_config" | grep LXC_NAME= | cut -d '=' -f2) - LXC_BRIDGE=$(cat "$pcheck_config" | grep LXC_BRIDGE= | cut -d '=' -f2) - main_iface=$(cat "$pcheck_config" | grep iface= | cut -d '=' -f2) -fi - # Exit with the correct exit code remove_lock () { - if [ $no_lock -eq 1 ] - then - sudo rm -f "$script_dir/../pcheck.lock" - fi -} - -exit_failure () { - remove_lock - exit 1 + rm -f "$lock_file" } exit_rebuild () { @@ -53,51 +30,6 @@ exit_sane () { exit 0 } -# Use the default value and set it in the config file -replace_default_value () { - CONFIG_KEY=$1 - local value=$(grep "|| $CONFIG_KEY=" "$build_script" | cut -d '=' -f2) - if grep -q $CONFIG_KEY= "$pcheck_config" - then - sed -i "s/$CONFIG_KEY=.*/$CONFIG_KEY=$value/" "$pcheck_config" - else - echo -e "$CONFIG_KEY=$value\n" >> "$pcheck_config" - fi - echo $value -} - -# Utilise des valeurs par défaut si les variables sont vides, et génère le fichier de config -if [ -z "$PLAGE_IP" ]; then - PLAGE_IP=$(replace_default_value PLAGE_IP) -fi -if [ -z "$DOMAIN" ]; then - DOMAIN=$(replace_default_value DOMAIN) -fi -if [ -z "$YUNO_PWD" ]; then - YUNO_PWD=$(replace_default_value YUNO_PWD) -fi -if [ -z "$LXC_NAME" ]; then - LXC_NAME=$(replace_default_value LXC_NAME) -fi -if [ -z "$LXC_BRIDGE" ]; then - LXC_BRIDGE=$(replace_default_value LXC_BRIDGE) -fi -if [ -z "$main_iface" ]; then - # Tente de définir l'interface réseau principale - main_iface=$(sudo ip route | grep default | awk '{print $5;}') # Prend l'interface réseau défini par default - if [ -z $main_iface ]; then - echo -e "\e[91mImpossible de déterminer le nom de l'interface réseau de l'hôte.\e[0m" - exit_failure - fi - # Store the main iface in the config file - if grep -q iface= "$pcheck_config" - then - sed -i "s/iface=.*/iface=$main_iface/" - else - echo -e "# Main host iface\niface=$main_iface\n" >> "$pcheck_config" - fi -fi - STOP_CONTAINER () { echo "Arrêt du conteneur $LXC_NAME" sudo lxc-stop -n $LXC_NAME @@ -107,16 +39,16 @@ START_NETWORK () { echo "Initialisation du réseau pour le conteneur." sudo ifup $LXC_BRIDGE --interfaces=/etc/network/interfaces.d/$LXC_BRIDGE # Activation des règles iptables - sudo iptables -A FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT - sudo iptables -A FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT - sudo iptables -t nat -A POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE + sudo iptables -A FORWARD -i $LXC_BRIDGE -o $MAIN_NETWORK_INTERFACE -j ACCEPT + sudo iptables -A FORWARD -i $MAIN_NETWORK_INTERFACE -o $LXC_BRIDGE -j ACCEPT + sudo iptables -t nat -A POSTROUTING -s $LXC_NETWORK.0/24 -j MASQUERADE } STOP_NETWORK () { echo "Arrêt du réseau pour le conteneur." - sudo iptables -D FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT > /dev/null 2>&1 - sudo iptables -D FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT > /dev/null 2>&1 - sudo iptables -t nat -D POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE > /dev/null 2>&1 + sudo iptables -D FORWARD -i $LXC_BRIDGE -o $MAIN_NETWORK_INTERFACE -j ACCEPT > /dev/null 2>&1 + sudo iptables -D FORWARD -i $MAIN_NETWORK_INTERFACE -o $LXC_BRIDGE -j ACCEPT > /dev/null 2>&1 + sudo iptables -t nat -D POSTROUTING -s $LXC_NETWORK.0/24 -j MASQUERADE > /dev/null 2>&1 sudo ifdown --force $LXC_BRIDGE > /dev/null 2>&1 } @@ -278,9 +210,7 @@ LXC_NETWORK_CONFIG () { fi } -if [ $no_lock -eq 0 ]; then - touch "$script_dir/../pcheck.lock" # Met en place le lock de Package check -fi +[ $no_lock -eq 0 ] && touch "$lock_file" STOP_CONTAINER STOP_NETWORK @@ -292,7 +222,7 @@ CREATE_BRIDGE () { echo | sudo tee /etc/network/interfaces.d/$LXC_BRIDGE < /dev/null 2>&1 if [ "$?" -ne 0 ]; then # En cas d'échec de connexion, tente de pinger un autre domaine pour être sûr ping -q -c 2 framasoft.org > /dev/null 2>&1 if [ "$?" -ne 0 ]; then # En cas de nouvel échec de connexion. On considère que la connexion est down... - echo -e "\e[91mL'hôte semble ne pas avoir accès à internet. La connexion internet est indispensable.\e[0m" - exit_failure + critical "L'hôte semble ne pas avoir accès à internet. La connexion internet est indispensable." fi fi echo -e "\e[92mL'hôte dispose d'un accès à internet.\e[0m" @@ -397,14 +325,11 @@ echo -e "\e[92mL'hôte dispose d'un accès à internet.\e[0m" ### Test le réseau du conteneur echo -e "\e[1m\n> Test de l'accès internet depuis le conteneur:\e[0m" CHECK_LXC_NET () { - sudo lxc-attach -n $LXC_NAME -- ping -q -c 2 yunohost.org > /dev/null 2>&1 - if [ "$?" -ne 0 ]; then # En cas d'échec de connexion, tente de pinger un autre domaine pour être sûr - sudo lxc-attach -n $LXC_NAME -- ping -q -c 2 framasoft.org > /dev/null 2>&1 - if [ "$?" -ne 0 ]; then # En cas de nouvel échec de connexion. On considère que la connexion est down... - return 1 - fi - fi - return 0 + RUN_INSIDE_LXC ping -q -c 2 yunohost.org > /dev/null 2>&1 \ + || RUN_INSIDE_LXC ping -q -c 2 framasoft.org > /dev/null 2>&1 \ + || return 1 + + return 0 } lxc_net=1 @@ -473,7 +398,7 @@ do else echo -e "\e[92mLe fichier network/interfaces du conteneur est présent.\nMais il va être réécrit par précaution.\e[0m" fi - echo -e "auto lo\niface lo inet loopback\nauto eth0\niface eth0 inet static\n\taddress $PLAGE_IP.2/24\n\tgateway $PLAGE_IP.1" | sudo tee /var/lib/lxc/$LXC_NAME/rootfs/etc/network/interfaces + echo -e "auto lo\niface lo inet loopback\nauto eth0\niface eth0 inet static\n\taddress $LXC_NETWORK.2/24\n\tgateway $LXC_NETWORK.1" | sudo tee /var/lib/lxc/$LXC_NAME/rootfs/etc/network/interfaces fi else echo -e "\e[92mLe conteneur dispose d'un accès à internet.\e[0m" @@ -483,21 +408,17 @@ done ### Test l'accès ssh sur le conteneur echo -e "\e[1m\n> Test de l'accès ssh:\e[0m" -# Check user -if [ "$(whoami)" != "$(cat "$script_dir/setup_user")" ] && test -e "$script_dir/setup_user"; then - echo -e "\e[91mPour tester l'accès ssh, le script doit être exécuté avec l'utilisateur $(cat "$script_dir/setup_user") !\nL'utilisateur actuel est $(whoami).\e[0m" - exit_failure -fi +assert_we_are_the_setup_user sudo lxc-ls -f sleep 3 -ssh $ARG_SSH $LXC_NAME "exit 0" # Test une connexion ssh +ssh -t $LXC_NAME "exit 0" # Test une connexion ssh if [ "$?" -eq 0 ]; then echo -e "\e[92mLa connexion ssh est fonctionnelle.\e[0m" else echo -e "\e[91mÉchec de la connexion ssh. Reconfiguration de l'accès ssh.\e[0m" check_repair=1 - ssh $ARG_SSH $LXC_NAME -v "exit 0" # Répète la connexion ssh pour afficher l'erreur. + ssh -t $LXC_NAME -v "exit 0" # Répète la connexion ssh pour afficher l'erreur. echo "Suppression de la config ssh actuelle pour le conteneur." rm -f $HOME/.ssh/$LXC_NAME $HOME/.ssh/$LXC_NAME.pub @@ -505,23 +426,23 @@ else BEGIN_LINE=$(cat $HOME/.ssh/config | grep -n "# ssh $LXC_NAME" | cut -d':' -f 1) sed -i "$BEGIN_LINE,/^IdentityFile/d" $HOME/.ssh/config - ssh-keygen -f "$HOME/.ssh/known_hosts" -R $PLAGE_IP.2 + ssh-keygen -f "$HOME/.ssh/known_hosts" -R $LXC_NETWORK.2 echo "Création de la clé ssh." ssh-keygen -t dsa -f $HOME/.ssh/$LXC_NAME -P '' sudo cp $HOME/.ssh/$LXC_NAME.pub /var/lib/lxc/$LXC_NAME/rootfs/home/pchecker/.ssh/authorized_keys - sudo lxc-attach -n $LXC_NAME -- chown pchecker: -R /home/pchecker/.ssh + RUN_INSIDE_LXC chown pchecker: -R /home/pchecker/.ssh echo "Ajout de la config ssh." echo | tee -a $HOME/.ssh/config <> ~/.ssh/known_hosts # Récupère la clé publique pour l'ajouter au known_hosts - ssh $ARG_SSH $LXC_NAME -v "exit 0" > /dev/null # Test à nouveau la connexion ssh + ssh -t $LXC_NAME -v "exit 0" > /dev/null # Test à nouveau la connexion ssh if [ "$?" -eq 0 ]; then echo -e "\e[92mLa connexion ssh est retablie.\e[0m" else @@ -532,7 +453,7 @@ fi ### Vérifie que Yunohost est installé echo -e "\e[1m\n> Vérifie que Yunohost est installé dans le conteneur:\e[0m" -sudo lxc-attach -n $LXC_NAME -- sudo yunohost -v +RUN_INSIDE_LXC sudo yunohost -v if [ "$?" -ne 0 ]; then # Si la commande échoue, il y a un problème avec Yunohost echo -e "\e[91mYunohost semble mal installé. Il est nécessaire de détruire et de reconstruire le conteneur.\e[0m" exit_rebuild diff --git a/sub_scripts/lxc_force_start.sh b/sub_scripts/lxc_force_start.sh deleted file mode 100755 index 8aa9680..0000000 --- a/sub_scripts/lxc_force_start.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -# Force le démarrage conteneur et active la config réseau dédiée. - -# Récupère le dossier du script -if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi - -pcheck_config="$script_dir/../config" -PLAGE_IP=$(cat "$pcheck_config" | grep PLAGE_IP= | cut -d '=' -f2) -LXC_NAME=$(cat "$pcheck_config" | grep LXC_NAME= | cut -d '=' -f2) -LXC_BRIDGE=$(cat "$pcheck_config" | grep LXC_BRIDGE= | cut -d '=' -f2) -main_iface=$(cat "$pcheck_config" | grep iface= | cut -d '=' -f2) - -"$script_dir/lxc_force_stop.sh" > /dev/null 2>&1 - -echo "Initialisation du réseau pour le conteneur." -sudo ifup $LXC_BRIDGE --interfaces=/etc/network/interfaces.d/$LXC_BRIDGE - -# Activation des règles iptables -echo "> Configure le parefeu" -sudo iptables -A FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT -sudo iptables -A FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT -sudo iptables -t nat -A POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE - -# Démarrage de la machine -echo "> Démarrage de la machine" -sudo lxc-start -n $LXC_NAME -d --logfile "$script_dir/lxc_boot.log" -sleep 3 - -# Vérifie que la machine a démarré -sudo lxc-ls -f - -echo "> Connexion au conteneur:" -echo "Pour exécuter une seule commande:" -echo -e "\e[1msudo lxc-attach -n $LXC_NAME -- commande\e[0m" - -echo "Pour établir une connexion ssh:" -if [ $(cat "$script_dir/setup_user") = "root" ]; then - echo -ne "\e[1msudo " -fi -echo -e "\e[1mssh -t $LXC_NAME 'bash -i'\e[0m" diff --git a/sub_scripts/lxc_force_stop.sh b/sub_scripts/lxc_force_stop.sh deleted file mode 100755 index 49b7a0a..0000000 --- a/sub_scripts/lxc_force_stop.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -# Force l'arrêt du conteneur et désactive la config réseau dédiée. - -# Récupère le dossier du script -if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi - -pcheck_config="$script_dir/../config" -PLAGE_IP=$(cat "$pcheck_config" | grep PLAGE_IP= | cut -d '=' -f2) -LXC_NAME=$(cat "$pcheck_config" | grep LXC_NAME= | cut -d '=' -f2) -LXC_BRIDGE=$(cat "$pcheck_config" | grep LXC_BRIDGE= | cut -d '=' -f2) -main_iface=$(cat "$pcheck_config" | grep iface= | cut -d '=' -f2) - -echo "> Arrêt de package_check" -# Kill package_check -# Retrieve the pid of Package check -package_check_pid="$(cat "$script_dir/../pcheck.lock" | cut -d: -f3)" -sudo kill --signal 15 $package_check_pid - -echo "> Arrêt du conteneur" -if [ $(sudo lxc-info --name $LXC_NAME | grep -c "STOPPED") -eq 0 ]; then - echo "Arrêt du conteneur $LXC_NAME" - sudo lxc-stop -n $LXC_NAME -fi - -echo "> Suppression des règles de parefeu" -if sudo iptables -C FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT 2> /dev/null -then - sudo iptables -D FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT -fi -if sudo iptables -C FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT 2> /dev/null -then - sudo iptables -D FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT -fi -if sudo iptables -t nat -C POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE 2> /dev/null -then - sudo iptables -t nat -D POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE -fi - -echo "Arrêt de l'interface réseau pour le conteneur." -# Et arrêt du bridge -if sudo ifquery $LXC_BRIDGE --state > /dev/null -then - sudo ifdown --force $LXC_BRIDGE -fi - -sudo lxc-ls -f - -sudo rm "$script_dir/../pcheck.lock" diff --git a/sub_scripts/lxc_remove.sh b/sub_scripts/lxc_remove.sh index ba30102..3db0703 100755 --- a/sub_scripts/lxc_remove.sh +++ b/sub_scripts/lxc_remove.sh @@ -1,58 +1,30 @@ #!/bin/bash -# Récupère le dossier du script -if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi - -pcheck_config="$script_dir/../config" -LXC_NAME=$(cat "$pcheck_config" | grep LXC_NAME= | cut -d '=' -f2) -LXC_BRIDGE=$(cat "$pcheck_config" | grep LXC_BRIDGE= | cut -d '=' -f2) +cd $(dirname $(realpath $0) | sed 's@/sub_scripts$@@g') +source "./sub_scripts/common.sh" # Check user -if [ "$(whoami)" != "$(cat "$script_dir/setup_user")" ] && test -e "$script_dir/setup_user"; then - echo -e "\e[91mCe script doit être exécuté avec l'utilisateur $(cat "$script_dir/setup_user") !\nL'utilisateur actuel est $(whoami)." - echo -en "\e[0m" - exit 0 -fi +assert_we_are_the_setup_user -echo_bold () { - if [ $quiet_remove -eq 0 ] - then - echo -e "\e[1m> $1\e[0m" - fi -} +touch "$lock_file" -quiet_remove=0 -# Check argument "quiet" -if [ "$1" = "quiet" ] -then - quiet_remove=1 -fi - -touch "$script_dir/../pcheck.lock" # Met en place le lock de Package check - -echo_bold "Retire l'ip forwarding." -sudo rm /etc/sysctl.d/lxc_pchecker.conf +log_title "Retire l'ip forwarding." +sudo rm -f /etc/sysctl.d/lxc_pchecker.conf sudo sysctl -p -echo_bold "Désactive le bridge réseau" +log_title "Désactive le bridge réseau" sudo ifdown --force $LXC_BRIDGE -echo_bold "Supprime le brige réseau" -sudo rm /etc/network/interfaces.d/$LXC_BRIDGE +log_title "Supprime le brige réseau" +sudo rm -f /etc/network/interfaces.d/$LXC_BRIDGE -echo_bold "Suppression de la machine et de son snapshots" +log_title "Suppression de la machine et de son snapshots" sudo lxc-snapshot -n $LXC_NAME -d snap0 sudo lxc-snapshot -n $LXC_NAME -d snap1 sudo lxc-snapshot -n $LXC_NAME -d snap2 sudo rm -f /var/lib/lxcsnaps/$LXC_NAME/snap0.tar.gz sudo lxc-destroy -n $LXC_NAME -f -if [ $quiet_remove -eq 0 ] -then - echo_bold "Remove lxc lxctl" - sudo apt-get remove lxc lxctl -fi - -echo_bold "Suppression des lignes de pchecker_lxc dans $HOME/.ssh/config" +log_title "Suppression des lignes de pchecker_lxc dans $HOME/.ssh/config" BEGIN_LINE=$(cat $HOME/.ssh/config | grep -n "^# ssh pchecker_lxc$" | cut -d':' -f 1 | tail -n1) sed -i "$BEGIN_LINE,/^IdentityFile/d" $HOME/.ssh/config diff --git a/sub_scripts/lxc_upgrade.sh b/sub_scripts/lxc_upgrade.sh deleted file mode 100755 index a7025bc..0000000 --- a/sub_scripts/lxc_upgrade.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash - -# Récupère le dossier du script -if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi - -if test -e "$script_dir/../pcheck.lock" -then # L'upgrade est annulé - echo "Le fichier $script_dir/../pcheck.lock est présent. Package check est déjà utilisé. Exécution annulée..." - exit 0 -fi -touch "$script_dir/../pcheck.lock" # Met en place le lock de Package check - -pcheck_config="$script_dir/../config" -PLAGE_IP=$(cat "$pcheck_config" | grep PLAGE_IP= | cut -d '=' -f2) -LXC_NAME=$(cat "$pcheck_config" | grep LXC_NAME= | cut -d '=' -f2) -LXC_BRIDGE=$(cat "$pcheck_config" | grep LXC_BRIDGE= | cut -d '=' -f2) -main_iface=$(cat "$pcheck_config" | grep iface= | cut -d '=' -f2) - -if [ -z "$main_iface" ]; then - # Tente de définir l'interface réseau principale - main_iface=$(sudo route | grep default | awk '{print $8;}') # Prend l'interface réseau défini par default - if [ -z $main_iface ]; then - echo -e "\e[91mImpossible de déterminer le nom de l'interface réseau de l'hôte.\e[0m" - exit 1 - fi - # Enregistre le nom de l'interface réseau de l'hôte dans un fichier de config - echo -e "# Interface réseau principale de l'hôte\niface=$main_iface\n" >> "$pcheck_config" -fi - -# Check user -if [ "$(whoami)" != "$(cat "$script_dir/setup_user")" ] && test -e "$script_dir/setup_user"; then - echo -e "\e[91mCe script doit être exécuté avec l'utilisateur $(cat "$script_dir/setup_user") !\nL'utilisateur actuel est $(whoami)." - echo -en "\e[0m" - rm "$script_dir/../pcheck.lock" # Retire le lock - exit 0 -fi - -echo -e "\e[1m> Active le bridge réseau\e[0m" -if ! sudo ifquery $LXC_BRIDGE --state > /dev/null -then - sudo ifup $LXC_BRIDGE --interfaces=/etc/network/interfaces.d/$LXC_BRIDGE -fi - -echo -e "\e[1m> Configure le parefeu\e[0m" -if ! sudo iptables -D FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT 2> /dev/null -then - sudo iptables -A FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT -fi -if ! sudo iptables -C FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT 2> /dev/null -then - sudo iptables -A FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT -fi -if ! sudo iptables -t nat -C POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE 2> /dev/null -then - sudo iptables -t nat -A POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE -fi - -echo -e "\e[1m> Démarrage de la machine\e[0m" -if [ $(sudo lxc-info --name $LXC_NAME | grep -c "STOPPED") -eq 0 ]; then - # Si la machine n'est pas à l'arrêt. - sudo lxc-stop -n $LXC_NAME # Arrête la machine LXC -fi -# Restaure le snapshot -sudo rsync -aEAX --delete -i /var/lib/lxcsnaps/$LXC_NAME/snap0/rootfs/ /var/lib/lxc/$LXC_NAME/rootfs/ > /dev/null # Pour être sûr! - -sudo lxc-start -n $LXC_NAME -d -sleep 3 -sudo lxc-ls -f - -echo -e "\e[1m> Update\e[0m" -update_apt=0 -sudo lxc-attach -n $LXC_NAME -- apt-get update -# Wait for apt to be available before the upgrade. -for try in `seq 1 17` -do - # Check if /var/lib/dpkg/lock is used by another process - if sudo lxc-attach -n $LXC_NAME -- lsof /var/lib/dpkg/lock > /dev/null - then - echo "apt is already in use..." - # Sleep an exponential time at each round - sleep $(( try * try )) - fi -done -sudo lxc-attach -n $LXC_NAME -- apt-get dist-upgrade --dry-run | grep -q "^Inst " # Vérifie si il y aura des mises à jour. - -if [ "$?" -eq 0 ]; then - update_apt=1 -fi -echo -e "\e[1m> Upgrade\e[0m" -sudo lxc-attach -n $LXC_NAME -- apt-get dist-upgrade --option Dpkg::Options::=--force-confold -yy - -echo -e "\e[1m> Clean\e[0m" -sudo lxc-attach -n $LXC_NAME -- apt-get autoremove -y -sudo lxc-attach -n $LXC_NAME -- apt-get autoclean -if [ "$update_apt" -eq 1 ] -then # Print les numéros de version de Yunohost, si il y a eu un upgrade - (sudo lxc-attach -n $LXC_NAME -- yunohost -v) | sudo tee "$script_dir/ynh_version" -fi - -# Disable password strength check -ssh $ARG_SSH $LXC_NAME "sudo yunohost settings set security.password.admin.strength -v -1" | tee -a "$LOG_BUILD_LXC" 2>&1 -ssh $ARG_SSH $LXC_NAME "sudo yunohost settings set security.password.user.strength -v -1" | tee -a "$LOG_BUILD_LXC" 2>&1 - -echo -e "\e[1m> Arrêt de la machine virtualisée\e[0m" -sudo lxc-stop -n $LXC_NAME - -echo -e "\e[1m> Suppression des règles de parefeu\e[0m" -sudo iptables -D FORWARD -i $LXC_BRIDGE -o $main_iface -j ACCEPT -sudo iptables -D FORWARD -i $main_iface -o $LXC_BRIDGE -j ACCEPT -sudo iptables -t nat -D POSTROUTING -s $PLAGE_IP.0/24 -j MASQUERADE -sudo ifdown --force $LXC_BRIDGE - - -if [ "$update_apt" -eq 1 ] -then - echo -e "\e[1m> Archivage du snapshot\e[0m" - sudo tar -cz --acls --xattrs -f /var/lib/lxcsnaps/$LXC_NAME/snap0.tar.gz /var/lib/lxcsnaps/$LXC_NAME/snap0 - echo -e "\e[1m> Remplacement du snapshot\e[0m" - sudo lxc-snapshot -n $LXC_NAME -d snap0 - sudo lxc-snapshot -n $LXC_NAME -fi - -sudo rm "$script_dir/../pcheck.lock" # Retire le lock diff --git a/sub_scripts/notifications.sh b/sub_scripts/notifications.sh new file mode 100755 index 0000000..7dbd417 --- /dev/null +++ b/sub_scripts/notifications.sh @@ -0,0 +1,196 @@ +#!/bin/bash + +#================================================= +# Determine if it's a CI environment +#================================================= + +# By default, it's a standalone execution. +type_exec_env=0 +# CI environment +[ -e "./../config" ] && type_exec_env=1 +# Official CI environment +[ -e "./../auto_build/auto.conf" ] && type_exec_env=2 + + +# Try to find a optionnal email address to notify the maintainer +# In this case, this email will be used instead of the email from the manifest. +notification_email="$(grep -m1 "^Email=" $TEST_CONTEXT/check_process.options | cut -d '=' -f2)" + +# Try to find a optionnal option for the grade of notification +notification_mode="$(grep -m1 "^Notification=" $TEST_CONTEXT/check_process.options | cut -d '=' -f2)" + + +#================================================= +# Notification grade +#================================================= + +notif_grade () { + # Check the level of notification from the check_process. + # Echo 1 if the grade is reached + + compare_grade () + { + if echo "$notification_mode" | grep -q "$1"; then + echo 1 + else + echo 0 + fi + } + + case "$1" in + all) + # If 'all' is needed, only a grade of notification at 'all' can match + compare_grade "^all$" + ;; + change) + # If 'change' is needed, notification at 'all' or 'change' can match + compare_grade "^all$\|^change$" + ;; + down) + # If 'down' is needed, notification at 'all', 'change' or 'down' match + compare_grade "^all$\|^change$\|^down$" + ;; + *) + echo 0 + ;; + esac +} + +#================================================= +# Inform of the results by XMPP and/or by mail +#================================================= + +send_mail=0 + +# If package check it's in the official CI environment +# Check the level variation +if [ $type_exec_env -eq 2 ] +then + + # Get the job name, stored in the work_list + job=$(head -n1 "./../work_list" | cut -d ';' -f 3) + + # Identify the type of test, stable (0), testing (1) or unstable (2) + # Default stable + test_type=0 + message="" + if echo "$job" | grep -q "(testing)" + then + message="(TESTING) " + test_type=1 + elif echo "$job" | grep -q "(unstable)" + then + message="(UNSTABLE) " + test_type=2 + fi + + # Build the log path (and replace all space by %20 in the job name) + if [ -n "$job" ]; then + if systemctl list-units | grep --quiet jenkins + then + job_log="/job/${job// /%20}/lastBuild/console" + elif systemctl list-units | grep --quiet yunorunner + then + # Get the directory of YunoRunner + ci_dir="$(grep WorkingDirectory= /etc/systemd/system/yunorunner.service | cut -d= -f2)" + # List the jobs from YunoRunner and grep the job (without Community or Official). + job_id="$(cd "$ci_dir"; ve3/bin/python ciclic list | grep ${job%% *} | head -n1)" + # Keep only the id of the job, by removing everything after - + job_id="${job_id%% -*}" + # And remove any space before the id. + job_id="${job_id##* }" + job_log="/job/$job_id" + fi + fi + + # If it's a test on testing or unstable + if [ $test_type -gt 0 ] + then + # Remove unstable or testing of the job name to find its stable version in the level list + job="${job% (*)}" + fi + + # Get the previous level, found in the file list_level_stable + previous_level=$(grep "^$job:" "./../auto_build/list_level_stable" | cut -d: -f2) + + # Print the variation of the level. If this level is different than 0 + if [ $global_level -gt 0 ] + then + message="${message}Application $app_id" + # If non previous level was found + if [ -z "$previous_level" ]; then + message="$message just reach the level $global_level" + send_mail=$(notif_grade all) + # If the level stays the same + elif [ $global_level -eq $previous_level ]; then + message="$message stays at level $global_level" + # Need notification at 'all' to notify by email + send_mail=$(notif_grade all) + # If the level go up + elif [ $global_level -gt $previous_level ]; then + message="$message rise from level $previous_level to level $global_level" + # Need notification at 'change' to notify by email + send_mail=$(notif_grade change) + # If the level go down + elif [ $global_level -lt $previous_level ]; then + message="$message go down from level $previous_level to level $global_level" + # Need notification at 'down' to notify by email + send_mail=$(notif_grade down) + fi + fi +fi + +# If the app completely failed and obtained 0 +if [ $global_level -eq 0 ] +then + message="${message}Application $app_id has completely failed the continuous integration tests" + + # Always send an email if the app failed + send_mail=1 +fi + +subject="[YunoHost] $message" + +# If the test was perform in the official CI environment +# Add the log address +# And inform with xmpp +if [ $type_exec_env -eq 2 ] +then + + # Build the address of the server from auto.conf + ci_path=$(grep "DOMAIN=" "./../auto_build/auto.conf" | cut -d= -f2)/$(grep "CI_PATH=" "./../auto_build/auto.conf" | cut -d= -f2) + + # Add the log adress to the message + message="$message on https://$ci_path$job_log" + + # Send a xmpp notification on the chat room "apps" + # Only for a test with the stable version of YunoHost + if [ $test_type -eq 0 ] + then + "./../auto_build/xmpp_bot/xmpp_post.sh" "$message" > /dev/null 2>&1 + fi +fi + +# Send a mail to main maintainer according to notification option in the check_process. +# Only if package check is in a CI environment (Official or not) +if [ $type_exec_env -ge 1 ] && [ $send_mail -eq 1 ] +then + + # Add a 'from' header for the official CI only. + # Apparently, this trick is not needed anymore !? + # if [ $type_exec_env -eq 2 ]; then + # from_yuno="-a \"From: yunohost@yunohost.org\"" + # fi + + # Get the maintainer email from the manifest. If it doesn't found if the check_process + if [ -z "$notification_email" ]; then + notification_email=$(grep '\"email\": ' "$package_path/manifest.json" | cut -d '"' -f 4) + fi + + # Send the message by mail, if a address has been find + if [ -n "$notification_email" ]; then + mail $from_yuno -s "$subject" "$notification_email" <<< "$message" + fi +fi + + diff --git a/sub_scripts/testing_process.sh b/sub_scripts/testing_process.sh old mode 100644 new mode 100755 index 50ae185..7cb3e51 --- a/sub_scripts/testing_process.sh +++ b/sub_scripts/testing_process.sh @@ -1,41 +1,35 @@ #!/bin/bash -echo -e "Loads functions from testing_process.sh" - #================================================= break_before_continue () { - # Make a break if auto_remove is set - if [ $auto_remove -eq 0 ] && [ $bash_mode -ne 1 ] + if [ $interactive -eq 1 ] then - LXC_CONNECT_INFO # Print access information + echo "To execute one command:" + echo " sudo lxc-attach -n $LXC_NAME -- command" + echo "To establish a ssh connection:" + echo " ssh -t $LXC_NAME" + read -p "Press a key to delete the application and continue...." < /dev/tty fi } +start_test () { -#================================================= + total_number_of_test=$(grep -c "=1$" $test_serie_dir/tests_to_perform) -PRINT_YUNOHOST_VERSION () { - small_title "YunoHost versions" + log_title "$1 [Test $current_test_number/$total_number_of_test]" - # Print the version of YunoHost from the LXC container - LXC_START "sudo yunohost --version" - - # Get the version of YunoHost from the LXC container - ynh_version=$(ssh -q $lxc_name "sudo yunohost --version --output-as json | jq '.yunohost.version' | tr -d '\"'") + # Increment the value of the current test + current_test_number=$((current_test_number+1)) } -#================================================= -# Install and remove an app -#================================================= - -run_yunohost() { +RUN_YUNOHOST_CMD() { # --output-as none is to disable the json-like output for some commands like backup create LXC_START "sudo PACKAGE_CHECK_EXEC=1 yunohost --output-as none --debug $1" \ - | grep -v --extended-regexp '^[0-9]+\s+.{1,15}DEBUG' \ + | grep --line-buffered -v --extended-regexp '^[0-9]+\s+.{1,15}DEBUG' \ | grep -v 'processing action' returncode=${PIPESTATUS[0]} @@ -43,128 +37,130 @@ run_yunohost() { return $returncode } +SET_RESULT() { + sed --in-place "s/RESULT_$1=.*$/RESULT_$1=$2/g" $test_serie_dir/results +} -SETUP_APP () { - # Install an application in a LXC container +SET_RESULT_IF_NONE_YET() { + if [ $(GET_RESULT $1) -eq 0 ] + then + sed --in-place "s/RESULT_$1=.*$/RESULT_$1=$2/g" $test_serie_dir/results + fi +} + +GET_RESULT() { + grep "RESULT_$1=" $test_serie_dir/results | awk -F= '{print $2}' +} + +#================================================= +# Install and remove an app +#================================================= + +INSTALL_APP () { + + local install_args="$(cat "$test_serie_dir/install_args")" + for arg_override in "$@" + do + key="$(echo $arg_override | cut -d '=' -f 1)" + value="$(echo $arg_override | cut -d '=' -f 2-)" + install_args=$(echo $install_args | sed "s@$key=[^&]*\&@$key=$value\&@") + done # Uses the default snapshot current_snapshot=snap0 # Exec the pre-install instruction, if there one - preinstall_script_template="$script_dir/tmp_context_for_tests/preinstall.sh.template" - if [ -e "$preinstall_script_template" ] + preinstall_script_template="$test_serie_dir/preinstall.sh.template" + if [ -n "$(cat $preinstall_script_template)" ] then - small_title "Pre installation request" + log_small_title "Pre installation request" # Start the lxc container LXC_START "true" # Copy all the instructions into a script - preinstall_script="$script_dir/tmp_context_for_tests/preinstall.sh" + preinstall_script="$test_serie_dir/preinstall.sh" cp "$preinstall_script_template" "$preinstall_script" chmod +x "$preinstall_script" # Hydrate the template with variables - sed -i "s/\$USER/$test_user/" "$preinstall_script" - sed -i "s/\$DOMAIN/$main_domain/" "$preinstall_script" - sed -i "s/\$SUBDOMAIN/$sub_domain/" "$preinstall_script" - sed -i "s/\$PASSWORD/$yuno_pwd/" "$preinstall_script" + sed -i "s/\$USER/$TEST_USER/" "$preinstall_script" + sed -i "s/\$DOMAIN/$DOMAIN/" "$preinstall_script" + sed -i "s/\$SUBDOMAIN/$SUBDOMAIN/" "$preinstall_script" + sed -i "s/\$PASSWORD/$YUNO_PWD/" "$preinstall_script" # Copy the pre-install script into the container. - scp -rq "$preinstall_script" "$lxc_name": + scp -rq "$preinstall_script" "$LXC_NAME": # Then execute the script to execute the pre-install commands. LXC_START "./preinstall.sh >&2" fi # Install the application in a LXC container - run_yunohost "app install --force '$package_dir' -a '$manifest_args_mod'" + RUN_YUNOHOST_CMD "app install --force ./app_folder/ -a '$install_args'" # yunohost_result gets the return code of the installation yunohost_result=$? # Print the result of the install command if [ $yunohost_result -eq 0 ]; then - debug "Installation successful." + log_debug "Installation successful." else - error "Installation failed. ($yunohost_result)" + log_error "Installation failed. ($yunohost_result)" fi - # Retrieve the app id in the log. To manage the app after - ynh_app_id=$(sudo tac "$yunohost_log" | grep --only-matching --max-count=1 "YNH_APP_INSTANCE_NAME=[^ ]*" | cut --delimiter='=' --fields=2) + return $yunohost_result } -STANDARD_SETUP_APP () { +LOAD_SNAPSHOT_OR_INSTALL_APP () { # Try to find an existing snapshot for this install, or make an install # If it's a root install if [ "$check_path" = "/" ] then - # Check if a snapshot already exist for this install - if [ -z "$root_snapshot" ] - then - # Make an installation - SETUP_APP - - # Create a snapshot for this installation, to be able to reuse it instead of a new installation. - # But only if this installation has worked fine - if [ $yunohost_result -eq 0 ]; then - # Check if a snapshot already exist for a root install - if [ -z "$root_snapshot" ] - then - debug "Creating a snapshot for root installation." - create_temp_backup 2 - root_snapshot=snap2 - fi - fi - else - # Or uses an existing snapshot - debug "Reusing an existing snapshot for root installation." - use_temp_snapshot $root_snapshot - fi - - # In case of sub path install, use another snapshot + install_type="root" + snapshot=$root_snapshot + snapshot_id=2 else - # Check if a snapshot already exist for this install - if [ -z "$subpath_snapshot" ] - then - # Make an installation - SETUP_APP + install_type="subpath" + snapshot=$subpath_snapshot + snapshot_id=1 + fi - # Create a snapshot for this installation, to be able to reuse it instead of a new installation. - # But only if this installation has worked fine - if [ $yunohost_result -eq 0 ]; then - # Check if a snapshot already exist for a subpath (or no_url) install - if [ -z "$subpath_snapshot" ] - then - debug "Creating a snapshot for sub path installation." - create_temp_backup 1 - root_snapshot=snap1 - fi - fi - else - # Or uses an existing snapshot - debug "Reusing an existing snapshot for sub path installation." - use_temp_snapshot $subpath_snapshot + # Create a snapshot if needed + if [ -z "$snapshot" ] + then + # Create a snapshot for this installation, to be able to reuse it instead of a new installation. + # But only if this installation has worked fine + if INSTALL_APP + then + log_debug "Creating a snapshot for $install_type installation." + CREATE_LXC_SNAPSHOT $snapshot_id + root_snapshot=snap$snapshot_id fi + else + # Or uses an existing snapshot + log_debug "Reusing an existing snapshot for $install_type installation." + LOAD_LXC_SNAPSHOT $snapshot fi } REMOVE_APP () { # Remove an application - # Make a break if auto_remove is set break_before_continue - small_title "Removing the app..." + log_small_title "Removing the app..." # Remove the application from the LXC container - run_yunohost "app remove '$ynh_app_id'" + RUN_YUNOHOST_CMD "app remove $app_id" # yunohost_remove gets the return code of the deletion - yunohost_remove=$? + local yunohost_remove=$? # Print the result of the remove command if [ "$yunohost_remove" -eq 0 ]; then - debug "Remove successful." + log_debug "Remove successful." else - error "Remove failed. ($yunohost_remove)" + log_error "Remove failed. ($yunohost_remove)" fi + + return $yunohost_remove } #================================================= @@ -173,6 +169,8 @@ REMOVE_APP () { VALIDATE_THAT_APP_CAN_BE_ACCESSED () { + local app_id_to_check=${1:-$app_id} + # Not checking this if this ain't relevant for the current test / app if [ $enable_validate_that_app_can_be_accessed == "true" ] then @@ -181,35 +179,18 @@ VALIDATE_THAT_APP_CAN_BE_ACCESSED () { return fi - small_title "Validating that the app can (or cannot) be accessed with its url..." + log_small_title "Validating that the app can (or cannot) be accessed with its url..." # Force a skipped_uris if public mode is not set - if [ "$install_type" != "private" ] && [ "$install_type" != "public" ] && [ -z "$public_arg" ] + if [ "$install_type" != "private" ] && [ "$install_type" != "public" ] then - warning "Forcing public access using a skipped_uris setting" + log_warning "Forcing public access using a skipped_uris setting" # Add a skipped_uris on / for the app - run_yunohost "app setting '$ynh_app_id' skipped_uris -v \"/\"" + RUN_YUNOHOST_CMD "app setting $app_id_to_check skipped_uris -v \"/\"" # Regen the config of sso - run_yunohost "app ssowatconf" + RUN_YUNOHOST_CMD "app ssowatconf" fi - # Inform /etc/hosts with the IP of LXC to resolve the domain. - # This is set only here and not before to prevent to help the app's scripts - echo -e "$ip_range.2 $main_domain #package_check\n$ip_range.2 $sub_domain #package_check" | sudo tee --append /etc/hosts > /dev/null - - # Try to resolv the domain during 10 seconds maximum. - local i=0 - for i in `seq 1 10`; do - curl --location --insecure $check_domain > /dev/null 2>&1 - # If curl return 6, it's an error "Could not resolve host" - if [ $? -ne 6 ]; then - # If not, curl is ready to work. - break - fi - echo -n . - sleep 1 - done - # curl_error indicate the result of curl test curl_error=0 # 503 Service Unavailable can would have some time to work. @@ -254,25 +235,31 @@ VALIDATE_THAT_APP_CAN_BE_ACCESSED () { fi # Remove the previous curl output - rm -f "$script_dir/url_output" + rm -f "./url_output" # Call curl to try to access to the url of the app - curl --location --insecure --silent --show-error --write-out "%{http_code};%{url_effective}\n" $check_domain$curl_check_path --output "$script_dir/url_output" > "$script_dir/curl_print" + curl --location --insecure --silent --show-error \ + --header "Host: $check_domain" \ + --resolve $check_domain:443:$LXC_NETWORK.2 \ + --write-out "%{http_code};%{url_effective}\n" \ + --output "./url_output" \ + $check_domain$curl_check_path \ + > "./curl_print" # Analyze the result of curl command if [ $? -ne 0 ] then - error "Connection error..." + log_error "Connection error..." curl_error=1 fi # Print informations about the connection - local http_code=$(cat "$script_dir/curl_print" | cut -d ';' -f1) + local http_code=$(cat "./curl_print" | cut -d ';' -f1) test_url_details=" Test url: $check_domain$curl_check_path - Real url: $(cat "$script_dir/curl_print" | cut --delimiter=';' --fields=2) + Real url: $(cat "./curl_print" | cut --delimiter=';' --fields=2) HTTP code: $http_code" - debug "$test_url_details" + log_debug "$test_url_details" # Analyze the http code if [ "${http_code:0:1}" = "0" ] || [ "${http_code:0:1}" = "4" ] || [ "${http_code:0:1}" = "5" ] || [ "${http_code:0:1}" = "6" ] @@ -287,7 +274,7 @@ VALIDATE_THAT_APP_CAN_BE_ACCESSED () { if [ "${http_code}" = "503" ] then curl_error=0 - warning "Service temporarily unavailable" + log_warning "Service temporarily unavailable" # 3 successive error are allowed http503=$(( http503 + 1 )) if [ $http503 -ge 3 ]; then @@ -305,21 +292,21 @@ VALIDATE_THAT_APP_CAN_BE_ACCESSED () { fi if [ $curl_error -eq 1 ]; then - error "The HTTP code shows an error." + log_error "The HTTP code shows an error." fi fi # Analyze the output of curl - if [ -e "$script_dir/url_output" ] + if [ -e "./url_output" ] then # Print the title of the page - local page_title=$(grep "" "$script_dir/url_output" | cut --delimiter='>' --fields=2 | cut --delimiter='<' --fields=1) - debug "Title of the page: $page_title" + local page_title=$(grep "<title>" "./url_output" | cut --delimiter='>' --fields=2 | cut --delimiter='<' --fields=1) + log_debug "Title of the page: $page_title" # Check if the page title is neither the YunoHost portail or default nginx page if [ "$page_title" = "YunoHost Portal" ] then - debug "The connection attempt fall on the YunoHost portal." + log_debug "The connection attempt fall on the YunoHost portal." yuno_portal=1 else yuno_portal=0 @@ -327,79 +314,22 @@ VALIDATE_THAT_APP_CAN_BE_ACCESSED () { then # Falling on nginx default page is an error. curl_error=1 - error "The connection attempt fall on nginx default page." + log_error "The connection attempt fall on nginx default page." fi # Print the first 20 lines of the page - debug "Extract of the page:" - page_extract=$(lynx -dump -force_html "$script_dir/url_output" | head --lines 20 | tee -a "$complete_log") + log_debug "Extract of the page:" + page_extract=$(lynx -dump -force_html "./url_output" | head --lines 20 | tee -a "$complete_log") - if [ $show_resources -eq 1 ] - then - # Get all the resources for the main page of the app. - local HTTP_return - local moved=0 - local ignored=0 - while read HTTP_return - do - # Ignore robots.txt and ynhpanel.js. They always redirect to the portal. - if echo "$HTTP_return" | grep --quiet "$check_domain/robots.txt\|$check_domain/ynhpanel.js"; then - debug "Ressource ignored: ${HTTP_return##*http*://}" - ignored=1 - fi - - # If it's the line with the resource to get - if echo "$HTTP_return" | grep --quiet "^--.*-- http" - then - # Get only the resource itself. - local resource=${HTTP_return##*http*://} - # Else, if would be the HTTP return code. - else - # If the return code is different than 200. - if ! echo "$HTTP_return" | grep --quiet "200 OK$" - then - # Skipped the check of ignored ressources. - if [ $ignored -eq 1 ] - then - ignored=0 - continue - fi - # Isolate the http return code. - http_code="${HTTP_return##*awaiting response... }" - http_code="${http_code:0:3}" - # If the return code is 301 or 302, let's check the redirection. - if echo "$HTTP_return" | grep --quiet "30[12] Moved" - then - debug "Ressource moved: $resource" - moved=1 - else - error "Resource unreachable (Code $http_code) $resource" - #curl_error=1 - moved=0 - fi - else - if [ $moved -eq 1 ] - then - if echo "$resource" | grep --quiet "/yunohost/sso/" - then - error "The previous resource is redirected to the YunoHost portal" - #curl_error=1 - fi - fi - moved=0 - fi - fi - done <<< "$(cd "$package_path"; LC_ALL=C wget --adjust-extension --page-requisites --no-check-certificate $check_domain$curl_check_path 2>&1 | grep "^--.*-- http\|^HTTP request sent")" - fi fi fi done if [[ $curl_error -ne 0 ]] then - warning "$test_url_details" - warning "Page title: $page_title" - warning "Page extract: $page_extract" + log_warning "$test_url_details" + log_warning "Page title: $page_title" + log_warning "Page extract: $page_extract" fi # Detect the issue alias_traversal, https://github.com/yandex/gixy/blob/master/docs/en/plugins/aliastraversal.md @@ -409,71 +339,33 @@ VALIDATE_THAT_APP_CAN_BE_ACCESSED () { <title>alias_traversal test

alias_traversal test

If you see this page, you have failed the test for alias_traversal issue." \ - | sudo tee /var/lib/lxc/$lxc_name/rootfs/var/www/html/alias_traversal.html > /dev/null + | sudo tee $LXC_ROOTFS/var/www/html/alias_traversal.html > /dev/null curl --location --insecure --silent $check_domain$check_path../html/alias_traversal.html \ | grep "title" | grep --quiet "alias_traversal test" \ - && error "Issue alias_traversal detected ! Please see here https://github.com/YunoHost/example_ynh/pull/45 to fix that." && RESULT_alias_traversal=1 - - # Remove the entries in /etc/hosts for the test domain - sudo sed --in-place '/#package_check/d' /etc/hosts + && log_error "Issue alias_traversal detected ! Please see here https://github.com/YunoHost/example_ynh/pull/45 to fix that." \ + && SET_RESULT alias_traversal 1 } #================================================= # Generic functions for unit tests #================================================= -start_test () { - - title "$1 [Test $current_test_number/$total_number_of_test]" - - # Increment the value of the current test - current_test_number=$((current_test_number+1)) -} - -replace_manifest_key () { - # Replace a generic manifest key by another - # $1 = Manifest key - # $2 = Replacement value - - # Build the variable name by concatenate $1 and _arg - local manifest_key=$(eval echo \$${1}_arg) - - if [ -n "$manifest_key" ] - then - manifest_args_mod=$(echo $manifest_args_mod | sed "s@$manifest_key=[^&]*\&@${manifest_key}=${2}\&@") - fi -} - check_test_result () { # Check the result and print SUCCESS or FAIL if [ $yunohost_result -eq 0 ] && [ $curl_error -eq 0 ] && [ $yuno_portal -eq 0 ] then - report_test_success + log_report_test_success return 0 else - report_test_failed + log_report_test_failed return 1 fi } -check_test_result_remove () { - - # Check the result of a remove and print SUCCESS or FAIL - - if [ $yunohost_remove -eq 0 ] - then - report_test_success - return 0 - else - report_test_failed - return 1 - fi -} - -is_install_failed () { +validate_that_at_least_one_install_succeeded () { # Check if an install have previously work # If the test for install in sub dir isn't desactivated @@ -482,7 +374,7 @@ is_install_failed () { then # If a test succeed or if force_install_ok is set # Or if $setup_sub_dir isn't set in the check_process - if [ $RESULT_check_sub_dir -eq 1 ] || [ $force_install_ok -eq 1 ] || [ $setup_sub_dir -eq -1 ] + if [ $(GET_RESULT check_sub_dir) -eq 1 ] || [ $force_install_ok -eq 1 ] || [ $setup_sub_dir -eq -1 ] then # Validate installation in sub dir. sub_dir_install=1 @@ -498,7 +390,7 @@ is_install_failed () { then # If a test succeed or if force_install_ok is set # Or if $setup_root isn't set in the check_process - if [ $RESULT_check_root -eq 1 ] || [ $force_install_ok -eq 1 ] || [ $setup_root -eq -1 ] + if [ $(GET_RESULT check_root) -eq 1 ] || [ $force_install_ok -eq 1 ] || [ $setup_root -eq -1 ] then # Validate installation on root. root_install=1 @@ -509,7 +401,7 @@ is_install_failed () { if [ $sub_dir_install -eq 0 ] && [ $root_install -eq 0 ] then - error "All installs failed, therefore this test cannot be performed..." + log_error "All installs failed, therefore this test cannot be performed..." return 1 fi } @@ -518,7 +410,7 @@ is_install_failed () { # Unit tests #================================================= -CHECK_SETUP () { +TEST_INSTALL () { # Try to install in a sub path, on root or without url access # $1 = install type @@ -531,38 +423,29 @@ CHECK_SETUP () { start_test "Installation without url access" fi - # Copy original arguments - local manifest_args_mod="$manifest_arguments" - # Replace manifest key for the test - check_domain=$sub_domain - replace_manifest_key "domain" "$check_domain" + check_domain=$SUBDOMAIN if [ "$install_type" = "subdir" ]; then local check_path=$test_path elif [ "$install_type" = "root" ]; then local check_path=/ fi - replace_manifest_key "path" "$check_path" - replace_manifest_key "user" "$test_user" - replace_manifest_key "public" "$public_public_arg" # Install the application in a LXC container - SETUP_APP + INSTALL_APP "domain=$check_domain" "path=$check_path" "user=$TEST_USER" "is_public=1" # Try to access the app by its url VALIDATE_THAT_APP_CAN_BE_ACCESSED # Check the result and print SUCCESS or FAIL if check_test_result - then # Success - RESULT_global_setup=1 # Installation succeed - local check_result_setup=1 # Installation succeed - else # Fail + then # Success + SET_RESULT global_setup 1 # Installation succeed + local check_result_setup=1 # Installation succeed + else # Fail # The global success for a installation can't be failed if another installation succeed - if [ $RESULT_global_setup -ne 1 ]; then - RESULT_global_setup=-1 # Installation failed - fi - local check_result_setup=-1 # Installation failed + SET_RESULT_IF_NONE_YET global_setup -1 + local check_result_setup=-1 # Installation failed fi # Create a snapshot for this installation, to be able to reuse it instead of a new installation. @@ -573,8 +456,8 @@ CHECK_SETUP () { # Check if a snapshot already exist for a root install if [ -z "$root_snapshot" ] then - debug "Create a snapshot for root installation." - create_temp_backup 2 + log_debug "Create a snapshot for root installation." + CREATE_LXC_SNAPSHOT 2 root_snapshot=snap2 fi else @@ -582,69 +465,71 @@ CHECK_SETUP () { if [ -z "$subpath_snapshot" ] then # Then create a snapshot - debug "Create a snapshot for sub path installation." - create_temp_backup 1 + log_debug "Create a snapshot for sub path installation." + CREATE_LXC_SNAPSHOT 1 subpath_snapshot=snap1 fi fi fi # Remove the application - REMOVE_APP - - # Check the result and print SUCCESS or FAIL - if check_test_result_remove - then # Success - local check_result_remove=1 # Remove in sub path succeed - RESULT_global_remove=1 # Remove succeed - else # Fail + if REMOVE_APP + then + log_report_test_success + local check_result_remove=1 + SET_RESULT global_remove 1 + else + log_report_test_failed # The global success for a deletion can't be failed if another remove succeed - if [ $RESULT_global_remove -ne 1 ]; - then - RESULT_global_remove=-1 # Remove failed - fi - local check_result_remove=-1 # Remove in sub path failed + SET_RESULT_IF_NONE_YET global_remove -1 + local check_result_remove=-1 fi # Reinstall the application after the removing # Try to resintall only if the first install is a success. if [ $check_result_setup -eq 1 ] then - small_title "Reinstall the application after a removing." + log_small_title "Reinstall the application after a removing." - SETUP_APP + INSTALL_APP "domain=$check_domain" "path=$check_path" "user=$TEST_USER" "is_public=1" # Try to access the app by its url VALIDATE_THAT_APP_CAN_BE_ACCESSED # Check the result and print SUCCESS or FAIL if check_test_result - then # Success - local check_result_setup=1 # Installation succeed - else # Fail - local check_result_setup=-1 # Installation failed + then # Success + local check_result_setup=1 # Installation succeed + else # Fail + local check_result_setup=-1 # Installation failed fi fi # Fill the correct variable depend on the type of test if [ "$install_type" = "subdir" ] then - RESULT_check_sub_dir=$check_result_setup - RESULT_check_remove_sub_dir=$check_result_remove - else # root and no_url - RESULT_check_root=$check_result_setup - RESULT_check_remove_root=$check_result_remove + SET_RESULT check_sub_dir $check_result_setup + SET_RESULT check_remove_sub_dir $check_result_remove + else # root and no_url + SET_RESULT check_root $check_result_setup + SET_RESULT check_remove_root $check_result_remove fi - # Make a break if auto_remove is set break_before_continue } -CHECK_UPGRADE () { +TEST_UPGRADE () { # Try the upgrade script + commits="" + for LINE in $(grep "^upgrade=1" "$test_serie_dir/check_process.tests_infos") + do + commit=$(echo $LINE | grep -o "from_commit=.*" | awk -F= '{print $2}') + [ -n "$commit"] || commits+="current " || commits+="$commit " + done + # Do an upgrade test for each commit in the upgrade list - while read <&4 commit + for commit in $commits do if [ "$commit" == "current" ] then @@ -664,77 +549,67 @@ CHECK_UPGRADE () { # Check if an install have previously work # Abort if none install worked - is_install_failed || return - - # Copy original arguments - local manifest_args_mod="$manifest_arguments" + validate_that_at_least_one_install_succeeded || return # Replace manifest key for the test - check_domain=$sub_domain - replace_manifest_key "domain" "$check_domain" + check_domain=$SUBDOMAIN # Use a path according to previous succeeded installs if [ $sub_dir_install -eq 1 ]; then local check_path=$test_path else local check_path=/ fi - replace_manifest_key "path" "$check_path" - replace_manifest_key "user" "$test_user" - replace_manifest_key "public" "$public_public_arg" # Install the application in a LXC container - small_title "Preliminary install..." + log_small_title "Preliminary install..." if [ "$commit" == "current" ] then # If no commit is specified, use the current version. - STANDARD_SETUP_APP + LOAD_SNAPSHOT_OR_INSTALL_APP "domain=$check_domain" "path=$check_path" "user=$TEST_USER" "is_public=1" else - # Otherwise, use a specific commit - # Backup the modified arguments - update_manifest_args="$manifest_args_mod" # Get the arguments of the manifest for this upgrade. - manifest_args_mod="$(grep "^manifest_arg=" "$check_process_section" | cut -d'=' -f2-)" - if [ -z "$manifest_args_mod" ]; then - # If there's no specific arguments, use the previous one. - manifest_args_mod="$update_manifest_args" - else - # Otherwise, keep the new arguments, and replace the variables. - manifest_args_mod="${manifest_args_mod//DOMAIN/$check_domain}" - manifest_args_mod="${manifest_args_mod//PATH/$check_path}" - manifest_args_mod="${manifest_args_mod//USER/$test_user}" + specific_upgrade_args="$(grep "^manifest_arg=" "$check_process_section" | cut -d'=' -f2-)" + if [ -n "$specific_upgrade_args" ]; then + cp "$test_serie_dir/install_args" "$test_serie_dir/install_args.bkp" + echo $specific_upgrade_args > "$test_serie_dir/install_args" fi + # Make a backup of the directory + # and Change to the specified commit sudo cp -a "$package_path" "${package_path}_back" - # Change to the specified commit (cd "$package_path"; git checkout --force --quiet "$commit") + # Install the application - SETUP_APP + INSTALL_APP "domain=$check_domain" "path=$check_path" "user=$TEST_USER" + + if [ -n "$specific_upgrade_args" ]; then + mv "$test_serie_dir/install_args.bkp" "$test_serie_dir/install_args" + fi + # Then replace the backup sudo rm -r "$package_path" sudo mv "${package_path}_back" "$package_path" - # And restore the arguments for the manifest - manifest_args_mod="$update_manifest_args" fi # Check if the install had work if [ $yunohost_result -ne 0 ] then - error "Installation failed..." - error "Upgrade test ignored..." + log_error "Installation failed..." + log_error "Upgrade test ignored..." else - small_title "Upgrade..." + log_small_title "Upgrade..." # Upgrade the application in a LXC container - run_yunohost "app upgrade $ynh_app_id -f '$package_dir'" + RUN_YUNOHOST_CMD "app upgrade $app_id -f '$package_path'" # yunohost_result gets the return code of the upgrade yunohost_result=$? # Print the result of the upgrade command if [ $yunohost_result -eq 0 ]; then - debug "Upgrade successful." + log_debug "Upgrade successful." else - error "Upgrade failed. ($yunohost_result)" + log_error "Upgrade failed. ($yunohost_result)" fi # Try to access the app by its url @@ -742,13 +617,11 @@ CHECK_UPGRADE () { # Check the result and print SUCCESS or FAIL if check_test_result - then # Success + then # Success # The global success for an upgrade can't be a success if another upgrade failed - if [ $RESULT_check_upgrade -ne -1 ]; then - RESULT_check_upgrade=1 # Upgrade succeed - fi - else # Fail - RESULT_check_upgrade=-1 # Upgrade failed + SET_RESULT_IF_NONE_YET check_upgrade 1 + else # Fail + SET_RESULT check_upgrade -1 fi # Remove the application @@ -759,10 +632,10 @@ CHECK_UPGRADE () { current_snapshot=snap0 # Stop and restore the LXC container LXC_STOP >> $complete_log - done 4< "$script_dir/tmp_context_for_tests/upgrade_list" + done } -CHECK_PUBLIC_PRIVATE () { +TEST_PUBLIC_PRIVATE () { # Try to install in public or private mode # $1 = install type @@ -775,20 +648,15 @@ CHECK_PUBLIC_PRIVATE () { # Check if an install have previously work - is_install_failed || return - - # Copy original arguments - local manifest_args_mod="$manifest_arguments" + validate_that_at_least_one_install_succeeded || return # Replace manifest key for the test - check_domain=$sub_domain - replace_manifest_key "domain" "$check_domain" - replace_manifest_key "user" "$test_user" + check_domain=$SUBDOMAIN # Set public or private according to type of test requested if [ "$install_type" = "private" ]; then - replace_manifest_key "public" "$public_private_arg" + local is_public="0" elif [ "$install_type" = "public" ]; then - replace_manifest_key "public" "$public_public_arg" + local is_public="1" fi # Initialize the value @@ -806,10 +674,9 @@ CHECK_PUBLIC_PRIVATE () { then # Replace manifest key for path local check_path=/ - replace_manifest_key "path" "$check_path" else # Jump to the second path if this check cannot be do - warning "Root install failed, therefore this test cannot be performed..." + log_warning "Root install failed, therefore this test cannot be performed..." continue fi @@ -821,16 +688,15 @@ CHECK_PUBLIC_PRIVATE () { then # Replace manifest key for path local check_path=$test_path - replace_manifest_key "path" "$check_path" else # Jump to the second path if this check cannot be do - warning "Sub path install failed, therefore this test cannot be performed..." + log_warning "Sub path install failed, therefore this test cannot be performed..." return fi fi # Install the application in a LXC container - SETUP_APP + INSTALL_APP "domain=$check_domain" "user=$TEST_USER" "is_public=$is_public" "path=$check_path" # Try to access the app by its url VALIDATE_THAT_APP_CAN_BE_ACCESSED @@ -840,40 +706,39 @@ CHECK_PUBLIC_PRIVATE () { then # In private mode, if curl doesn't fell on the ynh portal, it's a fail. if [ $yuno_portal -eq 0 ]; then - error "App is not private: it should redirect to the Yunohost portal, but is publicly accessible instead" + log_error "App is not private: it should redirect to the Yunohost portal, but is publicly accessible instead" yunohost_result=1 fi elif [ "$install_type" = "public" ] then # In public mode, if curl fell on the ynh portal, it's a fail. if [ $yuno_portal -eq 1 ]; then - error "App page is not public: it should be publicly accessible, but redirects to the Yunohost portal instead" + log_error "App page is not public: it should be publicly accessible, but redirects to the Yunohost portal instead" yunohost_result=1 fi fi # Check the result and print SUCCESS or FAIL if [ $yunohost_result -eq 0 ] && [ $curl_error -eq 0 ] - then - report_test_success + then + log_report_test_success # The global success for public/private mode can't be a success if another installation failed if [ $check_result_public_private -ne -1 ]; then - check_result_public_private=1 # Installation succeed + check_result_public_private=1 # Installation succeed fi else - report_test_failed - check_result_public_private=-1 # Installation failed + log_report_test_failed + check_result_public_private=-1 # Installation failed fi # Fill the correct variable depend on the type of test if [ "$install_type" = "private" ] then - RESULT_check_private=$check_result_public_private - else # public - RESULT_check_public=$check_result_public_private + SET_RESULT check_private $check_result_public_private + else # public + SET_RESULT check_public $check_result_public_private fi - # Make a break if auto_remove is set break_before_continue # Stop and restore the LXC container @@ -881,16 +746,13 @@ CHECK_PUBLIC_PRIVATE () { done } -CHECK_MULTI_INSTANCE () { +TEST_MULTI_INSTANCE () { # Try multi-instance installations start_test "Multi-instance installations" # Check if an install have previously work - is_install_failed || return - - # Copy original arguments - local manifest_args_mod="$manifest_arguments" + validate_that_at_least_one_install_succeeded || return # Replace manifest key for the test if [ $sub_dir_install -eq 1 ]; then @@ -898,44 +760,34 @@ CHECK_MULTI_INSTANCE () { else local check_path=/ fi - replace_manifest_key "path" "$check_path" - - replace_manifest_key "user" "$test_user" - replace_manifest_key "public" "$public_public_arg" # Install 2 times the same app local i=0 for i in 1 2 do - # First installation if [ $i -eq 1 ] then - check_domain=$main_domain - small_title "First installation: path=$check_domain$check_path" + check_domain=$DOMAIN + log_small_title "First installation: path=$check_domain$check_path" # Second installation elif [ $i -eq 2 ] then - check_domain=$sub_domain - small_title "Second installation: path=$check_domain$check_path" + check_domain=$SUBDOMAIN + log_small_title "Second installation: path=$check_domain$check_path" fi - # Replace path and domain manifest keys for the test - replace_manifest_key "domain" "$check_domain" - # Install the application in a LXC container - SETUP_APP + INSTALL_APP "domain=$check_domain" "path=$check_path" "user=$TEST_USER" "is_public=1" # Store the result in the correct variable # First installation if [ $i -eq 1 ] then local multi_yunohost_result_1=$yunohost_result - local ynh_app_id_1=$ynh_app_id # Second installation elif [ $i -eq 2 ] then local multi_yunohost_result_2=$yunohost_result - local ynh_app_id_2=$ynh_app_id fi done @@ -945,18 +797,14 @@ CHECK_MULTI_INSTANCE () { # First app if [ $i -eq 1 ] then - check_domain=$main_domain - ynh_app_id=$ynh_app_id_1 + check_domain=$DOMAIN # Second app elif [ $i -eq 2 ] then - check_domain=$sub_domain - ynh_app_id=$ynh_app_id_2 + check_domain=$SUBDOMAIN + VALIDATE_THAT_APP_CAN_BE_ACCESSED ${app_id}__2 fi - # Try to access the app by its url - VALIDATE_THAT_APP_CAN_BE_ACCESSED - # Check the result of curl test if [ $curl_error -ne 0 ] || [ $yuno_portal -ne 0 ] then @@ -976,116 +824,83 @@ CHECK_MULTI_INSTANCE () { # Check the result and print SUCCESS or FAIL # Succeed if the 2 installations work; if [ $multi_yunohost_result_1 -eq 0 ] && [ $multi_yunohost_result_2 -eq 0 ] - then # Success - report_test_success - RESULT_check_multi_instance=1 - else # Fail - report_test_failed - RESULT_check_multi_instance=-1 + then # Success + log_report_test_success + SET_RESULT check_multi_instance 1 + else # Fail + log_report_test_failed + SET_RESULT check_multi_instance -1 fi - # Make a break if auto_remove is set break_before_continue } -CHECK_COMMON_ERROR () { +TEST_PORT_ALREADY_USED () { # Try to install with specific complications # $1 = install type local install_type=$1 - if [ "$install_type" = "port_already_use" ] - start_test "Port already used" - fi + start_test "Port already used" # Check if an install have previously work - is_install_failed || return - - # Copy original arguments - local manifest_args_mod="$manifest_arguments" + validate_that_at_least_one_install_succeeded || return # Replace manifest key for the test - check_domain=$sub_domain - replace_manifest_key "domain" "$check_domain" - replace_manifest_key "user" "$test_user" - replace_manifest_key "public" "$public_public_arg" - + check_domain=$SUBDOMAIN # Replace path manifest key for the test - if [ "$install_type" = "port_already_use" ] - # Use a path according to previous succeeded installs - if [ $sub_dir_install -eq 1 ]; then - local check_path=$test_path - else - local check_path=/ - fi - replace_manifest_key "path" "$check_path" + # Use a path according to previous succeeded installs + if [ $sub_dir_install -eq 1 ]; then + local check_path=$test_path + else + local check_path=/ fi - # Open the specified port to force the script to find another - if [ "$install_type" = "port_already_use" ] + if grep -q -m1 "port_already_use=1" "$test_serie_dir/check_process.tests_infos" then - - # If the first character is a #, that means it this port number is not in the manifest - if [ "${port_arg:0:1}" = "#" ] - then - # Retrieve the port number - local check_port="${port_arg:1}" - - # Else, the port number is in the manifest. So the port number is set at a fixed value. - else - local check_port=6660 - # Replace port manifest key for the test - replace_manifest_key "port" "$check_port" - fi - - # Build a service with netcat for use this port before the app. - echo -e "[Service]\nExecStart=/bin/netcat -l -k -p $check_port\n - [Install]\nWantedBy=multi-user.target" | \ - sudo tee "/var/lib/lxc/$lxc_name/rootfs/etc/systemd/system/netcat.service" \ - > /dev/null - - # Then start this service to block this port. - LXC_START "sudo systemctl enable netcat & sudo systemctl start netcat" + local check_port=$(grep -m1 "port_already_use=1" "$test_serie_dir/check_process.tests_infos" | grep -o -E "\([0-9]+\)" | tr -d '()') + else + local check_port=6660 fi + # Build a service with netcat for use this port before the app. + echo -e "[Service]\nExecStart=/bin/netcat -l -k -p $check_port\n + [Install]\nWantedBy=multi-user.target" | \ + sudo tee "$LXC_ROOTFS/etc/systemd/system/netcat.service" \ + > /dev/null + + # Then start this service to block this port. + LXC_START "sudo systemctl enable netcat & sudo systemctl start netcat" + # Install the application in a LXC container - SETUP_APP + INSTALL_APP "domain=$check_domain" "user=$TEST_USER" "is_public=1" "path=$check_path" "port=$check_port" # Try to access the app by its url VALIDATE_THAT_APP_CAN_BE_ACCESSED # Check the result and print SUCCESS or FAIL if check_test_result - then # Success + then # Success local check_result_setup=1 - else # Fail + else # Fail local check_result_setup=-1 fi # Fill the correct variable depend on the type of test - if [ "$install_type" = "port_already_use" ]; then - RESULT_check_port=$check_result_setup - fi + SET_RESULT check_port $check_result_setup - # Make a break if auto_remove is set break_before_continue } -CHECK_BACKUP_RESTORE () { +TEST_BACKUP_RESTORE () { # Try to backup then restore the app start_test "Backup/Restore" # Check if an install have previously work - is_install_failed || return - - # Copy original arguments - local manifest_args_mod="$manifest_arguments" + validate_that_at_least_one_install_succeeded || return # Replace manifest key for the test - check_domain=$sub_domain - replace_manifest_key "domain" "$check_domain" - replace_manifest_key "user" "$test_user" - replace_manifest_key "public" "$public_public_arg" + check_domain=$SUBDOMAIN # Try in 2 times, first in root and second in sub path. local i=0 @@ -1099,11 +914,10 @@ CHECK_BACKUP_RESTORE () { then # Replace manifest key for path local check_path=/ - replace_manifest_key "path" "$check_path" - small_title "Preliminary installation on the root..." + log_small_title "Preliminary installation on the root..." else # Jump to the second path if this check cannot be do - warning "Root install failed, therefore this test cannot be performed..." + log_warning "Root install failed, therefore this test cannot be performed..." continue fi @@ -1115,62 +929,59 @@ CHECK_BACKUP_RESTORE () { then # Replace manifest key for path local check_path=$test_path - replace_manifest_key "path" "$check_path" - small_title "Preliminary installation in a sub path..." "white" "bold" clog + log_small_title "Preliminary installation in a sub path..." "white" "bold" clog else # Jump to the second path if this check cannot be do - warning "Sub path install failed, therefore this test cannot be performed..." + log_warning "Sub path install failed, therefore this test cannot be performed..." return fi fi # Install the application in a LXC container - STANDARD_SETUP_APP + LOAD_SNAPSHOT_OR_INSTALL_APP "domain=$check_domain" "user=$TEST_USER" "is_public=1" "path=$check_path" # Remove the previous residual backups - sudo rm -rf /var/lib/lxc/$lxc_name/rootfs/home/yunohost.backup/archives - sudo rm -rf /var/lib/lxcsnaps/$lxc_name/$current_snapshot/rootfs/home/yunohost.backup/archives + sudo rm -rf $LXC_ROOTFS/home/yunohost.backup/archives + sudo rm -rf $LXC_SNAPSHOTS/$current_snapshot/rootfs/home/yunohost.backup/archives # BACKUP # Made a backup if the installation succeed if [ $yunohost_result -ne 0 ] then - error "Installation failed..." + log_error "Installation failed..." else - small_title "Backup of the application..." + log_small_title "Backup of the application..." # A complete list of backup hooks is available at /usr/share/yunohost/hooks/backup/ backup_hooks="conf_ssowat data_home conf_ynh_firewall conf_cron" # Made a backup of the application - run_yunohost "backup create -n Backup_test --apps $ynh_app_id --system $backup_hooks" + RUN_YUNOHOST_CMD "backup create -n Backup_test --apps $app_id --system $backup_hooks" # yunohost_result gets the return code of the backup yunohost_result=$? # Print the result of the backup command if [ $yunohost_result -eq 0 ]; then - debug "Backup successful" + log_debug "Backup successful" else - error "Backup failed. ($yunohost_result)" + log_error "Backup failed. ($yunohost_result)" fi fi # Check the result and print SUCCESS or FAIL if [ $yunohost_result -eq 0 ] - then # Success - report_test_success + then # Success + log_report_test_success # The global success for a backup can't be a success if another backup failed - if [ $RESULT_check_backup -ne -1 ]; then - RESULT_check_backup=1 # Backup succeed - fi - else # Fail - report_test_failed - RESULT_check_backup=-1 # Backup failed + SET_RESULT_IF_NONE_YET check_backup 1 + else + log_report_test_failed + SET_RESULT check_backup -1 fi # Grab the backup archive into the LXC container, and keep a copy - sudo cp -a /var/lib/lxc/$lxc_name/rootfs/home/yunohost.backup/archives ./ + sudo cp -a $LXC_ROOTFS/home/yunohost.backup/archives ./ # RESTORE # Try the restore process in 2 times, first after removing the app, second after a restore of the container. @@ -1183,7 +994,7 @@ CHECK_BACKUP_RESTORE () { # Remove the application REMOVE_APP - small_title "Restore after removing the application..." + log_small_title "Restore after removing the application..." # Second, restore the whole container to remove completely the application elif [ $j -eq 1 ] @@ -1192,28 +1003,28 @@ CHECK_BACKUP_RESTORE () { current_snapshot=snap0 # Remove the previous residual backups - sudo rm -rf /var/lib/lxcsnaps/$lxc_name/$current_snapshot/rootfs/home/yunohost.backup/archives + sudo rm -rf $LXC_SNAPSHOTS/$current_snapshot/rootfs/home/yunohost.backup/archives # Place the copy of the backup archive in the container. - sudo mv -f ./archives /var/lib/lxcsnaps/$lxc_name/$current_snapshot/rootfs/home/yunohost.backup/ + sudo mv -f ./archives $LXC_SNAPSHOTS/$current_snapshot/rootfs/home/yunohost.backup/ # Stop and restore the LXC container LXC_STOP >> $complete_log - small_title "Restore on a clean YunoHost system..." + log_small_title "Restore on a clean YunoHost system..." fi # Restore the application from the previous backup - run_yunohost "backup restore Backup_test --force --apps $ynh_app_id" + RUN_YUNOHOST_CMD "backup restore Backup_test --force --apps $app_id" # yunohost_result gets the return code of the restore yunohost_result=$? # Print the result of the backup command if [ $yunohost_result -eq 0 ]; then - debug "Restore successful." + log_debug "Restore successful." else - error "Restore failed. ($yunohost_result)" + log_error "Restore failed. ($yunohost_result)" fi # Try to access the app by its url @@ -1221,16 +1032,13 @@ CHECK_BACKUP_RESTORE () { # Check the result and print SUCCESS or FAIL if check_test_result - then # Success + then # The global success for a restore can't be a success if another restore failed - if [ $RESULT_check_restore -ne -1 ]; then - RESULT_check_restore=1 # Restore succeed - fi - else # Fail - RESULT_check_restore=-1 # Restore failed + SET_RESULT_IF_NONE_YET check_restore 1 + else + SET_RESULT check_restore -1 fi - # Make a break if auto_remove is set break_before_continue # Stop and restore the LXC container @@ -1239,23 +1047,16 @@ CHECK_BACKUP_RESTORE () { done } -CHECK_CHANGE_URL () { +TEST_CHANGE_URL () { # Try the change_url script start_test "Change URL" # Check if an install have previously work - is_install_failed || return - - # Copy original arguments - local manifest_args_mod="$manifest_arguments" + validate_that_at_least_one_install_succeeded || return # Replace manifest key for the test - check_domain=$sub_domain - replace_manifest_key "domain" "$check_domain" - replace_manifest_key "user" "$test_user" - replace_manifest_key "public" "$public_public_arg" - + check_domain=$SUBDOMAIN # Try in 6 times ! # Without modify the domain, root to path, path to path and path to root. # And then, same with a domain change @@ -1266,43 +1067,42 @@ CHECK_CHANGE_URL () { # Same domain, root to path check_path=/ local new_path=$test_path - local new_domain=$sub_domain + local new_domain=$SUBDOMAIN elif [ $i -eq 2 ]; then # Same domain, path to path check_path=$test_path local new_path=${test_path}_2 - local new_domain=$sub_domain + local new_domain=$SUBDOMAIN elif [ $i -eq 3 ]; then # Same domain, path to root check_path=$test_path local new_path=/ - local new_domain=$sub_domain + local new_domain=$SUBDOMAIN elif [ $i -eq 4 ]; then # Other domain, root to path check_path=/ local new_path=$test_path - local new_domain=$main_domain + local new_domain=$DOMAIN elif [ $i -eq 5 ]; then # Other domain, path to path check_path=$test_path local new_path=${test_path}_2 - local new_domain=$main_domain + local new_domain=$DOMAIN elif [ $i -eq 6 ]; then # Other domain, path to root check_path=$test_path local new_path=/ - local new_domain=$main_domain + local new_domain=$DOMAIN elif [ $i -eq 7 ]; then # Other domain, root to root check_path=/ local new_path=/ - local new_domain=$main_domain + local new_domain=$DOMAIN fi - replace_manifest_key "path" "$check_path" # Ignore the test if it tries to move to the same address - if [ "$check_path" == "$new_path" ] && [ "$new_domain" == "$sub_domain" ]; then + if [ "$check_path" == "$new_path" ] && [ "$new_domain" == "$SUBDOMAIN" ]; then continue fi @@ -1313,12 +1113,12 @@ CHECK_CHANGE_URL () { if [ $root_install -eq 0 ] then # Skip this test - warning "Root install failed, therefore this test cannot be performed..." + log_warning "Root install failed, therefore this test cannot be performed..." continue elif [ "$new_path" != "/" ] && [ $sub_dir_install -eq 0 ] then # Skip this test - warning "Sub path install failed, therefore this test cannot be performed..." + log_warning "Sub path install failed, therefore this test cannot be performed..." continue fi # And with a sub path install @@ -1326,38 +1126,38 @@ CHECK_CHANGE_URL () { if [ $sub_dir_install -eq 0 ] then # Skip this test - warning "Sub path install failed, therefore this test cannot be performed..." + log_warning "Sub path install failed, therefore this test cannot be performed..." continue elif [ "$new_path" = "/" ] && [ $root_install -eq 0 ] then # Skip this test - warning "Root install failed, therefore this test cannot be performed..." + log_warning "Root install failed, therefore this test cannot be performed..." continue fi fi # Install the application in a LXC container - small_title "Preliminary install..." - STANDARD_SETUP_APP + log_small_title "Preliminary install..." + LOAD_SNAPSHOT_OR_INSTALL_APP "domain=$check_domain" "user=$TEST_USER" "is_public=1" "path=$check_path" # Check if the install had work if [ $yunohost_result -ne 0 ] then - error "Installation failed..." + log_error "Installation failed..." else - small_title "Change the url from $sub_domain$check_path to $new_domain$new_path..." + log_small_title "Change the url from $SUBDOMAIN$check_path to $new_domain$new_path..." # Change the url - run_yunohost "app change-url $ynh_app_id -d '$new_domain' -p '$new_path'" + RUN_YUNOHOST_CMD "app change-url $app_id -d '$new_domain' -p '$new_path'" # yunohost_result gets the return code of the change-url script yunohost_result=$? # Print the result of the change_url command if [ $yunohost_result -eq 0 ]; then - debug "Change_url script successful" + log_debug "Change_url script successful" else - error "Change_url script failed. ($yunohost_result)" + log_error "Change_url script failed. ($yunohost_result)" fi # Try to access the app by its url @@ -1368,16 +1168,13 @@ CHECK_CHANGE_URL () { # Check the result and print SUCCESS or FAIL if check_test_result - then # Success + then # Success # The global success for a change_url can't be a success if another change_url failed - if [ $RESULT_change_url -ne -1 ]; then - RESULT_change_url=1 # Change_url succeed - fi - else # Fail - RESULT_change_url=-1 # Change_url failed + SET_RESULT_IF_NONE_YET change_url 1 + else # Fail + SET_RESULT change_url -1 # Change_url failed fi - # Make a break if auto_remove is set break_before_continue # Uses the default snapshot @@ -1434,7 +1231,7 @@ ACTIONS_CONFIG_PANEL () { toml_file="$package_path/actions.toml" if [ ! -e "$toml_file" ] then - error "No actions.toml found !" + log_error "No actions.toml found !" return 1 fi @@ -1445,33 +1242,26 @@ ACTIONS_CONFIG_PANEL () { toml_file="$package_path/config_panel.toml" if [ ! -e "$toml_file" ] then - error "No config_panel.toml found !" + log_error "No config_panel.toml found !" return 1 fi fi # Check if an install have previously work - is_install_failed || return - - # Copy original arguments - local manifest_args_mod="$manifest_arguments" + validate_that_at_least_one_install_succeeded || return # Replace manifest key for the test - check_domain=$sub_domain - replace_manifest_key "domain" "$check_domain" - replace_manifest_key "user" "$test_user" - replace_manifest_key "public" "$public_public_arg" + check_domain=$SUBDOMAIN # Use a path according to previous succeeded installs if [ $sub_dir_install -eq 1 ]; then local check_path=$test_path else local check_path=/ fi - replace_manifest_key "path" "$check_path" # Install the application in a LXC container - small_title "Preliminary install..." - STANDARD_SETUP_APP + log_small_title "Preliminary install..." + LOAD_SNAPSHOT_OR_INSTALL_APP "domain=$check_domain" "user=$TEST_USER" "is_public=1" "path=$check_path" validate_action_config_panel() { @@ -1482,23 +1272,20 @@ ACTIONS_CONFIG_PANEL () { # Print the result of the command if [ $yunohost_result -eq 0 ]; then - debug "$message succeed." + log_debug "$message succeed." else - error "$message failed. ($yunohost_result)" + log_error "$message failed. ($yunohost_result)" fi # Check the result and print SUCCESS or FAIL if check_test_result - then # Success + then # Success # The global success for a actions can't be a success if another iteration failed - if [ $RESULT_action_config_panel -ne -1 ]; then - RESULT_action_config_panel=1 # Actions succeed - fi - else # Fail - RESULT_action_config_panel=-1 # Actions failed + SET_RESULT_IF_NONE_YET action_config_panel 1 # Actions succeed + else # Fail + SET_RESULT action_config_panel -1 # Actions failed fi - # Make a break if auto_remove is set break_before_continue } @@ -1509,7 +1296,7 @@ ACTIONS_CONFIG_PANEL () { # Do a test if the installation succeed if [ $yunohost_result -ne 0 ] then - error "The previous test has failed..." + log_error "The previous test has failed..." continue fi @@ -1517,18 +1304,18 @@ ACTIONS_CONFIG_PANEL () { then if [ "$test_type" == "actions" ] then - info "> List the available actions..." + log_info "> List the available actions..." # List the actions - run_yunohost "app action list $ynh_app_id" + RUN_YUNOHOST_CMD "app action list $app_id" validate_action_config_panel "yunohost app action list" elif [ "$test_type" == "config_panel" ] then - info "> Show the config panel..." + log_info "> Show the config panel..." # Show the config-panel - run_yunohost "app config show-panel $ynh_app_id" + RUN_YUNOHOST_CMD "app config show-panel $app_id" validate_action_config_panel "yunohost app config show-panel" fi elif [ $i -eq 2 ] @@ -1536,13 +1323,13 @@ ACTIONS_CONFIG_PANEL () { local parts if [ "$test_type" == "actions" ] then - info "> Execute the actions..." + log_info "> Execute the actions..." # Split the actions.toml file to separate each actions splitterAA "^[[:blank:]]*\[[^.]*\]" "$toml_file" elif [ "$test_type" == "config_panel" ] then - info "> Apply configurations..." + log_info "> Apply configurations..." # Split the config_panel.toml file to separate each configurations splitterAA "^[[:blank:]]*\[.*\]" "$toml_file" @@ -1613,10 +1400,24 @@ ACTIONS_CONFIG_PANEL () { if [ "$test_type" == "config_panel" ] then - local check_process_arguments="$config_panel_arguments" + check_process_arguments="" + while read line + do + # Remove all double quotes + add_arg="${line//\"/}" + # Then add this argument and follow it by : + check_process_arguments="${check_process_arguments}${add_arg}:" + done < $test_serie_dir/check_process.configpanel_infos elif [ "$test_type" == "actions" ] then - local check_process_arguments="$actions_arguments" + local check_process_arguments="" + while read line + do + # Remove all double quotes + add_arg="${line//\"/}" + # Then add this argument and follow it by : + check_process_arguments="${check_process_arguments}${add_arg}:" + done < $test_serie_dir/check_process.actions_infos fi # Look for arguments into the check_process if echo "$check_process_arguments" | grep --quiet "$action_config_argument_name" @@ -1660,29 +1461,29 @@ ACTIONS_CONFIG_PANEL () { local action_config_actual_argument="$action_config_argument_default" action_config_argument_built="--args $action_config_argument_name=\"$action_config_actual_argument\"" else - warning "> No argument into the check_process to use or default argument for \"$action_config_name\"..." + log_warning "> No argument into the check_process to use or default argument for \"$action_config_name\"..." action_config_actual_argument="" fi if [ "$test_type" == "config_panel" ] then - info "> Apply the configuration for \"$action_config_name\" with the argument \"$action_config_actual_argument\"..." + log_info "> Apply the configuration for \"$action_config_name\" with the argument \"$action_config_actual_argument\"..." elif [ "$test_type" == "actions" ] then - info "> Execute the action \"$action_config_name\" with the argument \"$action_config_actual_argument\"..." + log_info "> Execute the action \"$action_config_name\" with the argument \"$action_config_actual_argument\"..." fi else - info "> Execute the action \"$action_config_name\"..." + log_info "> Execute the action \"$action_config_name\"..." fi if [ "$test_type" == "config_panel" ] then # Aply a configuration - run_yunohost "app config apply $ynh_app_id $action_config_action $action_config_argument_built" + RUN_YUNOHOST_CMD "app config apply $app_id $action_config_action $action_config_argument_built" elif [ "$test_type" == "actions" ] then # Execute an action - run_yunohost "app action run $ynh_app_id $action_config_action $action_config_argument_built" + RUN_YUNOHOST_CMD "app action run $app_id $action_config_action $action_config_argument_built" fi validate_action_config_panel "yunohost action $action_config_action" done @@ -1702,126 +1503,70 @@ PACKAGE_LINTER () { start_test "Package linter" # Execute package linter and linter_result gets the return code of the package linter - "$script_dir/package_linter/package_linter.py" "$package_path" > "$script_dir/temp_linter_result.log" - "$script_dir/package_linter/package_linter.py" "$package_path" --json > "$script_dir/temp_linter_result.json" + "./package_linter/package_linter.py" "$package_path" > "./temp_linter_result.log" + "./package_linter/package_linter.py" "$package_path" --json > "./temp_linter_result.json" # Print the results of package linter and copy these result in the complete log - cat "$script_dir/temp_linter_result.log" | tee --append "$complete_log" - cat "$script_dir/temp_linter_result.json" >> "$complete_log" + cat "./temp_linter_result.log" | tee --append "$complete_log" + cat "./temp_linter_result.json" >> "$complete_log" - RESULT_linter_broken=0 - RESULT_linter_level_6=0 - RESULT_linter_level_7=0 - RESULT_linter_level_8=0 + SET_RESULT linter_broken 0 + SET_RESULT linter_level_6 0 + SET_RESULT linter_level_7 0 + SET_RESULT linter_level_8 0 # Check we qualify for level 6, 7, 8 # Linter will have a warning called "app_in_github_org" if app ain't in the # yunohost-apps org... - if ! cat "$script_dir/temp_linter_result.json" | jq ".warning" | grep -q "app_in_github_org" + if ! cat "./temp_linter_result.json" | jq ".warning" | grep -q "app_in_github_org" then - RESULT_linter_level_6=1 + SET_RESULT linter_level_6 1 fi - if cat "$script_dir/temp_linter_result.json" | jq ".success" | grep -q "qualify_for_level_7" + if cat "./temp_linter_result.json" | jq ".success" | grep -q "qualify_for_level_7" then - RESULT_linter_level_7=1 + SET_RESULT linter_level_7 1 fi - if cat "$script_dir/temp_linter_result.json" | jq ".success" | grep -q "qualify_for_level_8" + if cat "./temp_linter_result.json" | jq ".success" | grep -q "qualify_for_level_8" then - RESULT_linter_level_8=1 + SET_RESULT linter_level_8 1 fi # If there are any critical errors, we'll force level 0 - if [[ -n "$(cat "$script_dir/temp_linter_result.json" | jq ".critical" | grep -v '\[\]')" ]] + if [[ -n "$(cat "./temp_linter_result.json" | jq ".critical" | grep -v '\[\]')" ]] then - report_test_failed - RESULT_linter_broken=1 - RESULT_linter=-1 + log_report_test_failed + SET_RESULT linter_broken 1 + SET_RESULT linter -1 # If there are any regular errors, we'll cap to 4 - elif [[ -n "$(cat "$script_dir/temp_linter_result.json" | jq ".error" | grep -v '\[\]')" ]] + elif [[ -n "$(cat "./temp_linter_result.json" | jq ".error" | grep -v '\[\]')" ]] then - report_test_failed - RESULT_linter=-1 + log_report_test_failed + SET_RESULT linter -1 # Otherwise, test pass (we'll display a warning depending on if there are # any remaning warnings or not) else - if [[ -n "$(cat "$script_dir/temp_linter_result.json" | jq ".warning" | grep -v '\[\]')" ]] + if [[ -n "$(cat "./temp_linter_result.json" | jq ".warning" | grep -v '\[\]')" ]] then - report_test_warning + log_report_test_warning else - report_test_success + log_report_test_success fi - RESULT_linter=1 + SET_RESULT linter 1 fi } -TEST_LAUNCHER () { - # Abstract for test execution. - # $1 = Name of the function to execute - # $2 = Argument for the function - - # Intialize values - yunohost_result=-1 - yunohost_remove=-1 - false_positive_error=0 - max_false_positive_error_loop=3 - - for false_positive_error_loop in $( seq 1 $max_false_positive_error_loop ) - do - # Start the timer for this test - start_timer - # And keep this value separately - local global_start_timer=$starttime - - # Execute the test - $1 $2 - - if [ $false_positive_error -eq 1 ] - then - error "This test was aborted because of a $false_positive_error_cond error." - if [ $false_positive_error_loop -lt $max_false_positive_error_loop ] - then - warning "The test will restart." - current_test_number=$((current_test_number-1)) - fi - fi - - # Uses the default snapshot - current_snapshot=snap0 - - # Stop and restore the LXC container - LXC_STOP >> $complete_log - - # Restore the started time for the timer - starttime=$global_start_timer - # End the timer for the test - stop_timer 2 - - # Update the lock file with the date of the last finished test. - # $$ is the PID of package_check itself. - echo "$1 $2:$(date +%s):$$" > "$lock_file" - - # Exit the loop if there's no temporary errors detected. - if [ $false_positive_error -eq 0 ] - then - break - fi - done -} - set_witness_files () { # Create files to check if the remove script does not remove them accidentally echo "Create witness files..." >> "$complete_log" - lxc_dir="/var/lib/lxc/$lxc_name/rootfs" - create_witness_file () { [ "$2" = "file" ] && local action="touch" || local action="mkdir -p" - sudo $action "${lxc_dir}${1}" + sudo $action "${LXC_ROOTFS}${1}" } # Nginx conf - create_witness_file "/etc/nginx/conf.d/$main_domain.d/witnessfile.conf" file - create_witness_file "/etc/nginx/conf.d/$sub_domain.d/witnessfile.conf" file + create_witness_file "/etc/nginx/conf.d/$DOMAIN.d/witnessfile.conf" file + create_witness_file "/etc/nginx/conf.d/$SUBDOMAIN.d/witnessfile.conf" file # /etc create_witness_file "/etc/witnessfile" file @@ -1839,13 +1584,13 @@ set_witness_files () { create_witness_file "/var/log/witnessfile" file # Config fpm - if [ -d "${lxc_dir}/etc/php5/fpm" ]; then + if [ -d "${LXC_ROOTFS}/etc/php5/fpm" ]; then create_witness_file "/etc/php5/fpm/pool.d/witnessfile.conf" file fi - if [ -d "${lxc_dir}/etc/php/7.0/fpm" ]; then + if [ -d "${LXC_ROOTFS}/etc/php/7.0/fpm" ]; then create_witness_file "/etc/php/7.0/fpm/pool.d/witnessfile.conf" file fi - if [ -d "${lxc_dir}/etc/php/7.3/fpm" ]; then + if [ -d "${LXC_ROOTFS}/etc/php/7.3/fpm" ]; then create_witness_file "/etc/php/7.3/fpm/pool.d/witnessfile.conf" file fi @@ -1856,28 +1601,26 @@ set_witness_files () { create_witness_file "/etc/systemd/system/witnessfile.service" file # Database - sudo lxc-attach --name=$lxc_name -- mysqladmin --user=root --password=$(sudo cat "$lxc_dir/etc/yunohost/mysql") --wait status > /dev/null 2>&1 - sudo lxc-attach --name=$lxc_name -- mysql --user=root --password=$(sudo cat "$lxc_dir/etc/yunohost/mysql") --wait --execute="CREATE DATABASE witnessdb" > /dev/null 2>&1 + RUN_INSIDE_LXC mysqladmin --user=root --password=$(sudo cat "$LXC_ROOTFS/etc/yunohost/mysql") --wait status > /dev/null 2>&1 + RUN_INSIDE_LXC mysql --user=root --password=$(sudo cat "$LXC_ROOTFS/etc/yunohost/mysql") --wait --execute="CREATE DATABASE witnessdb" > /dev/null 2>&1 } check_witness_files () { # Check all the witness files, to verify if them still here - lxc_dir="/var/lib/lxc/$lxc_name/rootfs" - check_file_exist () { - if sudo test ! -e "${lxc_dir}${1}" + if sudo test ! -e "${LXC_ROOTFS}${1}" then - error "The file $1 is missing ! Something gone wrong !" - RESULT_witness=1 + log_error "The file $1 is missing ! Something gone wrong !" + SET_RESULT witness 1 fi } # Nginx conf - check_file_exist "/etc/nginx/conf.d/$main_domain.d/witnessfile.conf" - check_file_exist "/etc/nginx/conf.d/$sub_domain.d/witnessfile.conf" + check_file_exist "/etc/nginx/conf.d/$DOMAIN.d/witnessfile.conf" + check_file_exist "/etc/nginx/conf.d/$SUBDOMAIN.d/witnessfile.conf" - # /etc + # /etc check_file_exist "/etc/witnessfile" # /opt directory @@ -1893,13 +1636,13 @@ check_witness_files () { check_file_exist "/var/log/witnessfile" # Config fpm - if [ -d "${lxc_dir}/etc/php5/fpm" ]; then + if [ -d "${LXC_ROOTFS}/etc/php5/fpm" ]; then check_file_exist "/etc/php5/fpm/pool.d/witnessfile.conf" file fi - if [ -d "${lxc_dir}/etc/php/7.0/fpm" ]; then + if [ -d "${LXC_ROOTFS}/etc/php/7.0/fpm" ]; then check_file_exist "/etc/php/7.0/fpm/pool.d/witnessfile.conf" file fi - if [ -d "${lxc_dir}/etc/php/7.3/fpm" ]; then + if [ -d "${LXC_ROOTFS}/etc/php/7.3/fpm" ]; then check_file_exist "/etc/php/7.3/fpm/pool.d/witnessfile.conf" file fi @@ -1910,28 +1653,38 @@ check_witness_files () { check_file_exist "/etc/systemd/system/witnessfile.service" # Database - if ! sudo lxc-attach --name=$lxc_name -- mysqlshow --user=root --password=$(sudo cat "$lxc_dir/etc/yunohost/mysql") | grep --quiet '^| witnessdb' > /dev/null 2>&1 + if ! RUN_INSIDE_LXC mysqlshow --user=root --password=$(sudo cat "$LXC_ROOTFS/etc/yunohost/mysql") witnessdb > /dev/null 2>&1 then - error "The database witnessdb is missing ! Something gone wrong !" - RESULT_witness=1 + log_error "The database witnessdb is missing ! Something gone wrong !" + SET_RESULT witness 1 fi - if [ $RESULT_witness -eq 1 ] + if [ $(GET_RESULT witness) -eq 1 ] then yunohost_result=1 - yunohost_remove=1 fi } -TESTING_PROCESS () { +RUN_TEST_SERIE() { # Launch all tests successively + test_serie_dir=$1 - title "Tests serie: ${tests_serie#;; }" + curl_error=0 + yuno_portal=0 + + _path_arg=$(grep -m1 "(PATH)" $test_serie_dir/check_process.manifest_infos | grep -o "\S+=\S+" | awk -F= '{print $2}' | tr -d '"') + [ -n "$_path_arg" ] && test_path="$_path_arg" || test_path="/" + + log_title "Tests serie: $(cat $test_serie_dir/test_serie_name)" # Be sure that the container is running LXC_START "true" - PRINT_YUNOHOST_VERSION + log_small_title "YunoHost versions" + # Print the version of YunoHost from the LXC container + LXC_START "sudo yunohost --version" + + source $test_serie_dir/tests_to_perform # Init the value for the current test current_test_number=1 @@ -1945,34 +1698,34 @@ TESTING_PROCESS () { [ $pkg_linter -eq 1 ] && PACKAGE_LINTER # Try to install in a sub path - [ $setup_sub_dir -eq 1 ] && TEST_LAUNCHER CHECK_SETUP subdir + [ $setup_sub_dir -eq 1 ] && TEST_LAUNCHER TEST_INSTALL subdir # Try to install on root - [ $setup_root -eq 1 ] && TEST_LAUNCHER CHECK_SETUP root + [ $setup_root -eq 1 ] && TEST_LAUNCHER TEST_INSTALL root # Try to install without url access - [ $setup_nourl -eq 1 ] && TEST_LAUNCHER CHECK_SETUP no_url + [ $setup_nourl -eq 1 ] && TEST_LAUNCHER TEST_INSTALL no_url # Try the upgrade script - [ $upgrade -eq 1 ] && TEST_LAUNCHER CHECK_UPGRADE + [ $upgrade -eq 1 ] && TEST_LAUNCHER TEST_UPGRADE # Try to install in private mode - [ $setup_private -eq 1 ] && TEST_LAUNCHER CHECK_PUBLIC_PRIVATE private + [ $setup_private -eq 1 ] && TEST_LAUNCHER TEST_PUBLIC_PRIVATE private # Try to install in public mode - [ $setup_public -eq 1 ] && TEST_LAUNCHER CHECK_PUBLIC_PRIVATE public + [ $setup_public -eq 1 ] && TEST_LAUNCHER TEST_PUBLIC_PRIVATE public # Try multi-instance installations - [ $multi_instance -eq 1 ] && TEST_LAUNCHER CHECK_MULTI_INSTANCE + [ $multi_instance -eq 1 ] && TEST_LAUNCHER TEST_MULTI_INSTANCE # Try to install with a port already used - [ $port_already_use -eq 1 ] && TEST_LAUNCHER CHECK_COMMON_ERROR port_already_use + [ $port_already_use -eq 1 ] && TEST_LAUNCHER TEST_PORT_ALREADY_USED # Try to backup then restore the app - [ $backup_restore -eq 1 ] && TEST_LAUNCHER CHECK_BACKUP_RESTORE + [ $backup_restore -eq 1 ] && TEST_LAUNCHER TEST_BACKUP_RESTORE # Try the change_url script - [ $change_url -eq 1 ] && TEST_LAUNCHER CHECK_CHANGE_URL + [ $change_url -eq 1 ] && TEST_LAUNCHER TEST_CHANGE_URL # Try the actions [ $actions -eq 1 ] && TEST_LAUNCHER ACTIONS_CONFIG_PANEL actions @@ -1980,3 +1733,38 @@ TESTING_PROCESS () { # Try the config-panel [ $config_panel -eq 1 ] && TEST_LAUNCHER ACTIONS_CONFIG_PANEL config_panel } + +TEST_LAUNCHER () { + # Abstract for test execution. + # $1 = Name of the function to execute + # $2 = Argument for the function + + # Intialize values + yunohost_result=-1 + + # Start the timer for this test + start_timer + # And keep this value separately + local global_start_timer=$starttime + + # Execute the test + $1 $2 + + # Uses the default snapshot + current_snapshot=snap0 + + # Stop and restore the LXC container + LXC_STOP >> $complete_log + + # Restore the started time for the timer + starttime=$global_start_timer + # End the timer for the test + stop_timer 2 + + # Update the lock file with the date of the last finished test. + # $$ is the PID of package_check itself. + echo "$1 $2:$(date +%s):$$" > "$lock_file" + +} + +