Merge CI_package_check and simplify overly complex stuff

This commit is contained in:
Alexandre Aubin 2023-01-17 14:36:20 +01:00
parent f08af652ee
commit a2969820b5
13 changed files with 686 additions and 46 deletions

240
analyze_yunohost_app.sh Executable file
View file

@ -0,0 +1,240 @@
#!/bin/bash
if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi
cd $script_dir
if [ $# -ne 4 ]
then
echo "This script need to take in argument the package which be tested, the name of the test, and the ID of the worker."
exit 1
fi
mkdir -p /var/run/yunorunner/locks/
worker_id="$4"
lock_yunorunner="/var/run/yunorunner/locks/${worker_id}.lock"
lock_package_check="./package_check/pcheck-${worker_id}.lock"
# 10800 sec / 60 = 180 min = 3 hours
TIMEOUT="10800"
BASE_URL="$(cat "./config.py" | tr -d ' ' | grep "^BASE_URL=" | cut --delimiter="=" --fields=2)"
ynh_branch="$(cat "./config.py" | tr -d ' ' | grep "^YNH_BRANCH=" | cut --delimiter="=" --fields=2)"
arch="$(dpkg --print-architecture)"
dist="$(cat "./config.py" | tr -d ' ' | grep "^DIST=" | cut --delimiter="=" --fields=2)"
# Enable chat notifications only on main CI
if [[ "$ynh_branch" == "stable" ]] && [[ "$arch" == "amd64" ]] && [[ -e "./maintenance/chat_notify.sh" ]]
then
chat_notify="./maintenance/chat_notify.sh"
else
chat_notify="true" # 'true' is a dummy program that won't do anything
fi
#=================================================
# Delay the beginning of this script, to prevent concurrent executions
#=================================================
# Get 3 ramdom digit. To build a value between 001 and 999
milli_sleep=$(head --lines=20 /dev/urandom | tr --complement --delete '0-9' | head --bytes=3)
# And wait for this value in millisecond
sleep "5.$milli_sleep"
#============================
# Check / take the lock
#=============================
if [ -e $lock_yunorunner ]
then
lock_yunorunner_PID="$(cat $lock_yunorunner)"
if [ -n "$lock_yunorunner_PID" ]
then
# We check that the corresponding PID is still running AND that the PPid is not 1 ..
# If the PPid is 1, it tends to indicate that a previous analyseCI is still running and was not killed, and therefore got adopted by init.
# This typically happens when the job is cancelled / restarted .. though we should have a better way of handling cancellation from yunorunner directly :/
if ps --pid $lock_yunorunner_PID | grep --quiet $lock_yunorunner_PID && [[ $(grep PPid /proc/${lock_yunorunner_PID}/status | awk '{print $2}') != "1" ]]
then
echo -e "\e[91m\e[1m!!! Another analyseCI process is currently using the lock $lock_yunorunner !!!\e[0m"
"$chat_notify" "CI miserably crashed because another process is using the lock"
sleep 10
exit 1
fi
fi
[[ $(grep PPid /proc/$(lock_yunorunner_PID)/status | awk '{print $2}') != "1" ]] && { echo "Killing stale analyseCI process ..."; kill -s SIGTERM $lock_yunorunner_PID; sleep 30; }
echo "Removing stale lock"
rm -f $lock_yunorunner
fi
echo "$$" > $lock_yunorunner
#============================
# Cleanup after exit/kill
#=============================
function cleanup()
{
rm $lock_yunorunner
if [ -n "$package_check_pid" ]
then
kill -s SIGTERM $package_check_pid
WORKER_ID="$worker_id" ARCH="$arch" DIST="$dist" YNH_BRANCH="$ynh_branch" "./package_check/package_check.sh" --force-stop
fi
}
trap cleanup EXIT
trap 'exit 2' TERM
#============================
# Test parameters
#=============================
repo="$1"
test_name="$2"
job_id="$3"
# Keep only the repositery
repo=$(echo $repo | cut --delimiter=';' --fields=1)
app="$(echo $test_name | awk '{print $1}')"
test_full_log=${app}_${arch}_${ynh_branch}_complete.log
test_json_results=${app}_${arch}_${ynh_branch}_results.json
test_url="$BASE_URL/job/$job_id"
# Make sure /usr/local/bin is in the path, because that's where the lxc/lxd bin lives
export PATH=$PATH:/usr/local/bin
#=================================================
# Timeout handling utils
#=================================================
function watchdog() {
local package_check_pid=$1
# Start a loop while package check is working
while ps --pid $package_check_pid | grep --quiet $package_check_pid
do
sleep 10
if [ -e $lock_package_check ]
then
lock_timestamp="$(stat -c %Y $lock_package_check)"
current_timestamp="$(date +%s)"
if [[ "$(($current_timestamp - $lock_timestamp))" -gt "$TIMEOUT" ]]
then
kill -s SIGTERM $package_check_pid
rm -f $lock_package_check
force_stop "Package check aborted, timeout reached ($(( $TIMEOUT / 60 )) min)."
return 1
fi
fi
done
if [ ! -e "./package_check/results-$worker_id.json" ]
then
force_stop "It looks like package_check did not finish properly ... on $test_url"
return 1
fi
}
function force_stop() {
local message="$1"
echo -e "\e[91m\e[1m!!! $message !!!\e[0m"
"$chat_notify" "While testing $app: $message"
WORKER_ID="$worker_id" ARCH="$arch" DIST="$dist" YNH_BRANCH="$ynh_branch" "./package_check/package_check.sh" --force-stop
}
#=================================================
# The actual testing ...
#=================================================
# Exec package check according to the architecture
echo "$(date) - Starting a test for $app on architecture $arch distribution $dist with yunohost $ynh_branch"
rm -f "./package_check/Complete-$worker_id.log"
rm -f "./package_check/results-$worker_id.json"
# Here we use a weird trick with 'script -qefc'
# The reason is that :
# if running the command in background (with &) the corresponding command *won't be in a tty* (not sure exactly)
# therefore later, the command lxc exec -t *won't be in a tty* (despite the -t) and command outputs will appear empty...
# Instead, with the magic of script -qefc we can pretend to be in a tty somehow...
# Adapted from https://stackoverflow.com/questions/32910661/pretend-to-be-a-tty-in-bash-for-any-command
cmd="WORKER_ID=$worker_id ARCH=$arch DIST=$dist YNH_BRANCH=$ynh_branch nice --adjustment=10 './package_check/package_check.sh' '$repo' 2>&1"
script -qefc "$cmd" &
watchdog $! || exit 1
# Copy the complete log
cp "./package_check/Complete-$worker_id.log" "./results/logs/$test_full_log"
cp "./package_check/results-$worker_id.json" "./results/logs/$test_json_results"
rm -f "./package_check/Complete-$worker_id.log"
rm -f "./package_check/results-$worker_id.json"
mkdir -p "./summary/"
[ ! -e "./package_check/summary.png" ] || cp "./package_check/summary.png" "./summary/${job_id}.png"
if [ -n "$BASE_URL" ]
then
full_log_path="$BASE_URL/logs/$test_full_log"
else
full_log_path="$(pwd)/logs/$test_full_log"
fi
echo "The complete log for this application was duplicated and is accessible at $full_log_path"
echo ""
echo "-------------------------------------------"
echo ""
#=================================================
# Check / update level of the app
#=================================================
public_result_list="./results/logs/list_level_${ynh_branch}_$arch.json"
[ -s "$public_result_list" ] || echo "{}" > "$public_result_list"
# Check that we have a valid json...
jq -e '' "./results/logs/$test_json_results" >/dev/null 2>/dev/null && bad_json="false" || bad_json="true"
# Get new level and previous level
app_level="$(jq -r ".level" "./logs/$test_json_results")"
previous_level="$(jq -r ".$app" "$public_result_list")"
# We post message on chat if we're running for tests on stable/amd64
if [ "$bad_json" == "true" ] || [ "$app_level" -eq 0 ]; then
message="Application $app completely failed the continuous integration tests"
elif [ -z "$previous_level" ]; then
message="Application $app rises from level (unknown) to level $app_level"
elif [ $app_level -gt $previous_level ]; then
message="Application $app rises from level $previous_level to level $app_level"
elif [ $app_level -lt $previous_level ]; then
message="Application $app goes down from level $previous_level to level $app_level"
elif [ $app_level -ge 6 ]; then
# Dont notify anything, reduce CI flood on app chatroom
message=""
else
message="Application $app stays at level $app_level"
fi
# Send chat notification
if [[ -n "$message" ]]
then
message+=" on $test_url"
echo $message
"$chat_notify" "$message"
fi
# Update/add the results from package_check in the public result list
if [ "$bad_json" == "false" ]
then
jq --argfile results "./results/logs/$test_json_results" ".\"$app\"=\$results" $public_result_list > $public_result_list.new
mv $public_result_list.new $public_result_list
fi
# Annnd we're done !
echo "$(date) - Test completed"
[ "$app_level" -gt 5 ] && exit 0 || exit 1

20
ciclic
View file

@ -14,23 +14,6 @@ except ImportError:
DOMAIN = "localhost:" + str(PORT) DOMAIN = "localhost:" + str(PORT)
def require_token():
if os.path.exists("token") and open("token").read().strip():
return
print("You need a token to be able to uses this command tool for security reasons, please refer to the README on how to add one https://github.com/YunoHost/yunorunner")
try:
token = input("Token: ").strip()
except KeyboardInterrupt:
print()
token = None
if not token:
print("Error: you need to provide a valid token")
sys.exit(1)
open("token", "w").write(token)
def request_api(path, domain, verb, data={}, check_return_code=True): def request_api(path, domain, verb, data={}, check_return_code=True):
assert verb in ("get", "post", "put", "delete") assert verb in ("get", "post", "put", "delete")
@ -41,7 +24,7 @@ def request_api(path, domain, verb, data={}, check_return_code=True):
response = getattr(requests, verb)( response = getattr(requests, verb)(
"http%s://%s/api/%s" % ("s" if https else "", domain, path), "http%s://%s/api/%s" % ("s" if https else "", domain, path),
headers={"X-Token": open("token", "r").read().strip()}, headers={"X-Token": open(".admin_token", "r").read().strip()},
json=data, json=data,
) )
@ -145,5 +128,4 @@ def restart(job_id, domain=DOMAIN):
if __name__ == '__main__': if __name__ == '__main__':
require_token()
argh.dispatch_commands([add, list_, delete, stop, restart, app_list, shell]) argh.dispatch_commands([add, list_, delete, stop, restart, app_list, shell])

View file

@ -1,4 +0,0 @@
bearnaise https://ci-apps.yunohost.org/
samourai https://ci-apps-arm.yunohost.org/
samourai https://ci-apps-unstable.yunohost.org/
root@ci-stretch.nohost.me https://ci-stretch.nohost.me/

24
maintenance/chat_notify.sh Executable file
View file

@ -0,0 +1,24 @@
#!/bin/bash
#
# To install before using:
#
# MCHOME="/opt/matrix-commander"
# MCARGS="-c $MCHOME/credentials.json --store $MCHOME/store"
# mkdir -p "$MCHOME/venv"
# python3 -m venv "$MCHOME/venv"
# source "$MCHOME/venv/bin/activate"
# pip3 install matrix-commander
# chmod 700 "$MCHOME"
# matrix-commander $MCARGS --login password # < NB here this is literally 'password' as authentication method, the actual password will be asked by a prompt
# matrix-commander $MCARGS --room-join '#yunohost-apps:matrix.org'
#
# groupadd matrixcommander
# usermod -a -G matrixcommander yunorunner
# chgrp -R matrixcommander $MCHOME
# chmod -R 770 $MCHOME
#
MCHOME="/opt/matrix-commander/"
MCARGS="-c $MCHOME/credentials.json --store $MCHOME/store"
timeout 10 "$MCHOME/venv/bin/matrix-commander" $MCARGS -m "$@" --room 'yunohost-apps'

243
maintenance/finish_install.sh Executable file
View file

@ -0,0 +1,243 @@
#!/bin/bash
cd "$(dirname $(realpath $0))"
if (( $# < 1 ))
then
cat << EOF
Usage: ./finish_install.sh [auto|manual] [cluster]
1st argument is the CI type (scheduling strategy):
- auto means job will automatically be scheduled by yunorunner from apps.json etc.
- manual means job will be scheduled manually (e.g. via webhooks or yunorunner ciclic)
2nd argument is to build the first node of an lxc cluster
- lxd cluster will be created with the current server
- some.domain.tld will be the cluster hostname and SecretAdminPasswurzd! the trust password to join the cluster
EOF
exit 1
fi
YUNORUNNER_HOME="/var/www/yunorunner"
if [ $(pwd) != "$YUNORUNNER_HOME" ]
then
echo "This script should be ran from $YUNORUNNER_HOME"
exit 1
fi
ci_type=$3
lxd_cluster=$4
# User which execute the CI software.
ci_user=yunorunner
echo_bold () {
echo -e "\e[1m$1\e[0m"
}
# -----------------------------------------------------------------
function tweak_yunohost() {
#echo_bold "> Setting up Yunohost..."
#local DIST="bullseye"
#local INSTALL_SCRIPT="https://install.yunohost.org/$DIST"
#curl $INSTALL_SCRIPT | bash -s -- -a
#echo_bold "> Running yunohost postinstall"
#yunohost tools postinstall --domain $domain --password $yuno_pwd
# What is it used for :| ...
#echo_bold "> Create Yunohost CI user"
#local ynh_ci_user=ynhci
#yunohost user create --firstname "$ynh_ci_user" --domain "$domain" --lastname "$ynh_ci_user" "$ynh_ci_user" --password $yuno_pwd
# Idk why this is needed but wokay I guess >_>
echo -e "\n127.0.0.1 $domain #CI_APP" >> /etc/hosts
echo_bold "> Disabling unecessary services to save up RAM"
for SERVICE in mysql php7.4-fpm metronome rspamd dovecot postfix redis-server postsrsd yunohost-api avahi-daemon
do
systemctl stop $SERVICE
systemctl disable $SERVICE --quiet
done
}
function tweak_yunorunner() {
echo_bold "> Tweaking YunoRunner..."
#if ! yunohost app list --output-as json --quiet | jq -e '.apps[] | select(.id == "yunorunner")' >/dev/null
#then
# yunohost app install --force https://github.com/YunoHost-Apps/yunorunner_ynh -a "domain=$domain&path=/$ci_path"
#fi
domain=$(yunohost app setting yunorunner domain)
ci_path=$(yunohost app setting yunorunner path)
port=$(yunohost app setting yunorunner port)
# Stop YunoRunner
# to be started manually by the admin after the CI_package_check install
# finishes
systemctl stop $ci_user
# Remove the original database, in order to rebuilt it with the new config.
rm -f $YUNORUNNER_HOME/db.sqlite
# For automatic / "main" CI we want to auto schedule jobs using the app list
if [ $ci_type == "auto" ]
then
cat >/var/www/yunorunner/config.py <<EOF
BASE_URL = "https://$domain/$ci_path"
PORT = $port
PATH_TO_ANALYZER = "$YUNORUNNER_HOME/analyze_yunohost_app.sh"
MONITOR_APPS_LIST = True
MONITOR_GIT = True
MONITOR_ONLY_GOOD_QUALITY_APPS = False
MONTHLY_JOBS = True
WORKER_COUNT = 1
YNH_BRANCH = stable
DIST = $DIST
EOF
# For Dev CI, we want to control the job scheduling entirely
# (c.f. the github webhooks)
else
cat >/var/www/yunorunner/config.py <<EOF
BASE_URL = "https://$domain/$ci_path"
PORT = $port
PATH_TO_ANALYZER = "$YUNORUNNER_HOME/analyze_yunohost_app.sh"
MONITOR_APPS_LIST = False
MONITOR_GIT = False
MONITOR_ONLY_GOOD_QUALITY_APPS = False
MONTHLY_JOBS = False
WORKER_COUNT = 1
YNH_BRANCH = stable
DIST = $DIST
EOF
fi
# Add permission to the user for the entire yunorunner home because it'll be the one running the tests (as a non-root user)
chown -R $ci_user $YUNORUNNER_HOME
# Put YunoRunner as the default app on the root of the domain
yunohost app makedefault -d "$domain" yunorunner
}
function setup_lxd() {
if ! yunohost app list --output-as json --quiet | jq -e '.apps[] | select(.id == "lxd")' >/dev/null
then
yunohost app install --force https://github.com/YunoHost-Apps/lxd_ynh
fi
echo_bold "> Configuring lxd..."
if [ "$lxd_cluster" == "cluster" ]
then
local free_space=$(df --output=avail / | sed 1d)
local btrfs_size=$(( $free_space * 90 / 100 / 1024 / 1024 ))
local lxc_network=$((1 + $RANDOM % 254))
yunohost firewall allow TCP 8443
cat >./preseed.conf <<EOF
config:
cluster.https_address: $domain:8443
core.https_address: ${domain}:8443
core.trust_password: ${yuno_pwd}
networks:
- config:
ipv4.address: 192.168.${lxc_network}.1/24
ipv4.nat: "true"
ipv6.address: none
description: ""
name: lxdbr0
type: bridge
project: default
storage_pools:
- config:
size: ${btrfs_size}GB
source: /var/lib/lxd/disks/local.img
description: ""
name: local
driver: btrfs
profiles:
- config: {}
description: Default LXD profile
devices:
lxdbr0:
nictype: bridged
parent: lxdbr0
type: nic
root:
path: /
pool: local
type: disk
name: default
projects:
- config:
features.images: "true"
features.networks: "true"
features.profiles: "true"
features.storage.volumes: "true"
description: Default LXD project
name: default
cluster:
server_name: ${domain}
enabled: true
EOF
cat ./preseed.conf | lxd init --preseed
rm ./preseed.conf
lxc config set core.https_address [::]
else
lxd init --auto --storage-backend=dir
fi
# ci_user will be the one launching job, gives it permission to run lxd commands
usermod -a -G lxd $ci_user
# We need a home for the "su" command later ?
mkdir -p /home/$ci_user
chown -R $ci_user /home/$ci_user
su $ci_user -s /bin/bash -c "lxc remote add yunohost https://devbaseimgs.yunohost.org --public --accept-certificate"
}
function add_cron_jobs() {
echo_bold "> Configuring the CI..."
# Cron tasks
cat >> "/etc/cron.d/yunorunner" << EOF
# self-upgrade every night
0 3 * * * root "$YUNORUNNER_HOME/maintenance/self_upgrade.sh" >> "$YUNORUNNER_HOME/maintenance/self_upgrade.log" 2>&1
# Update app list
0 20 * * 5 root "$YUNORUNNER_HOME/maintenance/update_level_apps.sh" >> "$YUNORUNNER_HOME/maintenance/update_level_apps.log" 2>&1
# Update badges
0 1 * * * root "$YUNORUNNER_HOME/maintenance/update_badges.sh" >> "$YUNORUNNER_HOME/maintenance/update_badges.log" 2>&1
EOF
}
# =========================
# Main stuff
# =========================
#git clone https://github.com/YunoHost/package_check "./package_check"
#install_dependencies
[ -e /usr/bin/yunohost ] || { echo "YunoHost is not installed"; exit; }
[ -e /etc/yunohost/apps/yunorunner ] || { echo "Yunorunner is not installed on YunoHost"; exit; }
tweak_yunohost
tweak_yunorunner
setup_lxd
add_cron_jobs
echo "Done!"
echo " "
echo "N.B. : If you want to enable Matrix notification, you should look at "
echo "the instructions inside lib/chat_notify.sh to deploy matrix-commander"
echo ""
echo "You may also want to tweak the 'config' file to run test with a different branch / arch"
echo ""
echo "When you're ready to start the CI, run: systemctl restart $ci_user"

24
maintenance/self_upgrade.sh Executable file
View file

@ -0,0 +1,24 @@
#!/bin/bash
# This script is designed to be used in a cron file
#=================================================
# Grab the script directory
#=================================================
if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi
cd $script_dir/..
# We only self-upgrade if we're in a git repo on master branch
# (which should correspond to production contexts)
[[ -d ".git" ]] || exit
[[ $(git rev-parse --abbrev-ref HEAD) == "master" ]] || exit
git fetch origin --quiet
# If already up to date, don't do anything else
[[ $(git rev-parse HEAD) == $(git rev-parse origin/master) ]] && exit
git reset --hard origin/master --quiet

59
maintenance/update_badges.sh Executable file
View file

@ -0,0 +1,59 @@
#!/bin/bash
#=================================================
# Grab the script directory
#=================================================
if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi
#=================================================
# Get the list and check for any modifications
#=================================================
# Get the apps list from app.yunohost.org
wget -nv https://app.yunohost.org/default/v3/apps.json -O "$script_dir/apps.json"
do_update=1
if [ -e "$script_dir/apps.json.md5" ]
then
if md5sum --check --status "$script_dir/apps.json.md5"
then
echo "No changes into the app list since the last execution."
do_update=0
fi
fi
if [ $do_update -eq 1 ]
then
md5sum "$script_dir/apps.json" > "$script_dir/apps.json.md5"
#=================================================
# Update badges for all apps
#=================================================
# Parse each app into the list
while read app
do
# Get the status for this app
state=$(jq --raw-output ".apps[\"$app\"] | .state" "$script_dir/apps.json")
level=$(jq --raw-output ".apps[\"$app\"] | .level" "$script_dir/apps.json")
if [[ "$state" == "working" ]]
then
if [[ "$level" == "null" ]] || [[ "$level" == "?" ]]
then
state="just-got-added-to-catalog"
elif [[ "$level" == "0" ]] || [[ "$level" == "-1" ]]
then
state="broken"
fi
fi
# Get the maintained status for this app
maintained=$(jq --raw-output ".apps[\"$app\"] | .antifeatures | .[]" "$script_dir/apps.json" | grep -q 'package-not-maintained' && echo unmaintained || echo maintained)
cp "$script_dir/../badges/$state.svg" "$script_dir/../badges/${app}.status.svg"
cp "$script_dir/../badges/$maintained.svg" "$script_dir/../badges/${app}.maintain.svg"
# List all apps from the list, by getting manifest ID.
done <<< "$(jq --raw-output ".apps[] | .manifest.id" "$script_dir/apps.json")"
fi

View file

@ -0,0 +1,69 @@
#!/bin/bash
# Récupère le dossier du script
if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi
# Only run this on the true main, official CI, prevent instances from packagers to push weird stuff
grep -q "BASE_URL.*=.*ci-apps.yunohost.org" "$script_dir/../config" || exit 0
# Supprime le précédent clone de YunoHost/apps
rm -r "$script_dir/../../apps"
# Se place dans le dossier du dépot git pour le script python
tmp_dir=$(mktemp -d)
git clone -q git@github.com:YunoHost/apps.git "$tmp_dir"
cd $tmp_dir
# Créer une nouvelle branche pour commiter les changements
git checkout -b modify_level
public_result_list="$script_dir/results/logs/list_level_stable_amd64.json"
majorregressions=""
minorregressions=""
improvements=""
# For each app in the result file
for APP in $(jq -r 'keys[]' "$public_result_list")
do
# Check if the app is still in the list
if ! jq -r 'keys[]' "apps.json" | grep -qw $APP; then
continue
fi
current_level="$(jq -r ".\"$APP\".level" apps.json)"
# Get the level from the stable+amd64 tests
new_level="$(jq -r ".\"$APP\".level" "$public_result_list")"
if [[ "$current_level" != "null" ]] && [[ "$new_level" -lt "$current_level" ]]
then
if [[ "$new_level" -le 4 ]] && [[ "$current_level" -gt 4 ]]
then
majorregressions+=" - $APP $current_level -> $new_level | https://ci-apps.yunohost.org/ci/apps/$APP/latestjob\n"
else
minorregressions+=" - $APP $current_level -> $new_level | https://ci-apps.yunohost.org/ci/apps/$APP/latestjob\n"
fi
elif [[ "$new_level" != "$current_level" ]]
then
improvements+=" - $APP $current_level -> $new_level | https://ci-apps.yunohost.org/ci/apps/$APP/latestjob\n"
fi
# Inject the new level value to apps.json
jq --sort-keys --indent 4 --arg APP $APP --argjson new_level $new_level '.[$APP].level=$new_level' apps.json > apps.json.new
mv apps.json.new apps.json
done
# Affiche les changements (2 lignes de contexte suffisent à voir l'app)
# git diff -U2 --raw
# Ajoute les modifications des listes au prochain commit
git add --all *.json
git commit -q -m "Update app levels according to CI results$(echo -e "\n\n### Major Regressions\n$majorregressions\n\n### Minor Regressions\n$minorregressions\n\n### Improvements\n$improvements")"
# Git doit être configuré sur la machine.
# git config --global user.email "MAIL..."
# git config --global user.name "yunohost-bot"
# ssh-keygen -t rsa -f $HOME/.ssh/github -P '' Pour créer une clé ssh sans passphrase
# Host github.com
# IdentityFile ~/.ssh/github
# Dans le config ssh
# Et la clé doit être enregistrée dans le compte github de yunohost-bot
git push -q -u origin modify_level

View file

@ -1,4 +0,0 @@
ALTER TABLE {tableName} ADD COLUMN COLNew {type};
ALTER TABLE Repo ADD COLUMN state string;
ALTER TABLE Repo ADD COLUMN random_job_day int;

13
results/badges/get_badges.sh Executable file
View file

@ -0,0 +1,13 @@
#!/bin/bash
# Get the path of this script
script_dir="$(dirname "$(realpath "$0")")"
wget -q https://upload.wikimedia.org/wikipedia/commons/1/1d/No_image.svg -O "$script_dir/maintained.svg"
wget -q https://img.shields.io/badge/Status-Package%20not%20maintained-red.svg -O "$script_dir/unmaintained.svg"
wget -q https://img.shields.io/badge/Status-working-brightgreen.svg -O "$script_dir/working.svg"
wget -q https://img.shields.io/badge/Status-Just%20got%20added%20to%20catalog-yellowgreen.svg -O "$script_dir/just-got-added-to-catalog.svg"
wget -q https://img.shields.io/badge/Status-In%20progress-orange.svg -O "$script_dir/inprogress.svg"
wget -q https://img.shields.io/badge/Status-Not%20working-red.svg -O "$script_dir/notworking.svg"
wget -q https://img.shields.io/badge/Status-Broken-red.svg -O "$script_dir/broken.svg"

0
results/logs/.gitkeep Normal file
View file

BIN
results/summary/empty.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 272 B

32
run.py
View file

@ -10,6 +10,7 @@ import asyncio
import traceback import traceback
import itertools import itertools
import tracemalloc import tracemalloc
import string
import hmac import hmac
import hashlib import hashlib
@ -40,6 +41,10 @@ from playhouse.shortcuts import model_to_dict
from models import Repo, Job, db, Worker from models import Repo, Job, db, Worker
from schedule import always_relaunch, once_per_day from schedule import always_relaunch, once_per_day
# This is used by ciclic
admin_token = ''.join(random.choices(string.ascii_lowercase + string.digits, k=32))
open(".admin_token", "w").write(admin_token)
try: try:
asyncio_all_tasks = asyncio.all_tasks asyncio_all_tasks = asyncio.all_tasks
except AttributeError as e: except AttributeError as e:
@ -425,7 +430,7 @@ async def jobs_dispatcher():
async def run_job(worker, job): async def run_job(worker, job):
path_to_analyseCI = app.config.PATH_TO_ANALYZER path_to_analyzer = app.config.PATH_TO_ANALYZER
await broadcast({ await broadcast({
"action": "update_job", "action": "update_job",
@ -435,11 +440,11 @@ async def run_job(worker, job):
# fake stupid command, whould run CI instead # fake stupid command, whould run CI instead
task_logger.info(f"Starting job '{job.name}' #{job.id}...") task_logger.info(f"Starting job '{job.name}' #{job.id}...")
cwd = os.path.split(path_to_analyseCI)[0] cwd = os.path.split(path_to_analyzer)[0]
arguments = f' {job.url_or_path} "{job.name}" {job.id} {worker.id}' arguments = f' {job.url_or_path} "{job.name}" {job.id} {worker.id}'
task_logger.info(f"Launch command: /bin/bash " + path_to_analyseCI + arguments) task_logger.info(f"Launch command: /bin/bash " + path_to_analyzer + arguments)
try: try:
command = await asyncio.create_subprocess_shell("/bin/bash " + path_to_analyseCI + arguments, command = await asyncio.create_subprocess_shell("/bin/bash " + path_to_analyzer + arguments,
cwd=cwd, cwd=cwd,
# default limit is not enough in some situations # default limit is not enough in some situations
limit=(2 ** 16) ** 10, limit=(2 ** 16) ** 10,
@ -777,22 +782,11 @@ def require_token():
'to access the API, please ' 'to access the API, please '
'refer to the README'}, 403) 'refer to the README'}, 403)
if not os.path.exists("tokens"):
api_logger.warning("No tokens available and a user is trying "
"to access the API")
return response.json({'status': 'invalide token'}, 403)
async with aiofiles.open('tokens', mode='r') as file:
tokens = await file.read()
tokens = {x.strip() for x in tokens.split("\n") if x.strip()}
token = request.headers["X-Token"].strip() token = request.headers["X-Token"].strip()
if token not in tokens: if not await hmac.compare_digest(token, admin_token):
api_logger.warning(f"someone tried to access the API using " api_logger.warning("someone tried to access the API using an invalid admin token")
"the {token} but it's not a valid token in " return response.json({'status': 'invalid token'}, 403)
"the 'tokens' file")
return response.json({'status': 'invalide token'}, 403)
result = await f(request, *args, **kwargs) result = await f(request, *args, **kwargs)
return result return result
@ -1265,7 +1259,7 @@ def main(config="./config.py"):
"BASE_URL": "", "BASE_URL": "",
"PORT": 4242, "PORT": 4242,
"DEBUG": False, "DEBUG": False,
"PATH_TO_ANALYZER": "/home/CI_package_check/analyseCI.sh", "PATH_TO_ANALYZER": "./analyze_yunohost_app.sh",
"MONITOR_APPS_LIST": False, "MONITOR_APPS_LIST": False,
"MONITOR_GIT": False, "MONITOR_GIT": False,
"MONITOR_ONLY_GOOD_QUALITY_APPS": False, "MONITOR_ONLY_GOOD_QUALITY_APPS": False,