mirror of
https://github.com/YunoHost/yunorunner.git
synced 2024-09-03 20:05:52 +02:00
Moar refactoring x_x
This commit is contained in:
parent
912dfda4d7
commit
a435d2973f
4 changed files with 197 additions and 279 deletions
|
@ -1,239 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
if [ "${0:0:1}" == "/" ]; then script_dir="$(dirname "$0")"; else script_dir="$(echo $PWD/$(dirname "$0" | cut -d '.' -f2) | sed 's@/$@@')"; fi
|
|
||||||
|
|
||||||
cd $script_dir
|
|
||||||
|
|
||||||
if [ $# -ne 4 ]
|
|
||||||
then
|
|
||||||
echo "This script need to take in argument the package which be tested, the name of the test, and the ID of the worker."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p /var/run/yunorunner/locks/
|
|
||||||
|
|
||||||
worker_id="$4"
|
|
||||||
lock_yunorunner="/var/run/yunorunner/locks/${worker_id}.lock"
|
|
||||||
lock_package_check="./package_check/pcheck-${worker_id}.lock"
|
|
||||||
|
|
||||||
# 10800 sec / 60 = 180 min = 3 hours
|
|
||||||
TIMEOUT="10800"
|
|
||||||
|
|
||||||
BASE_URL="$(cat "./config.py" | tr -d ' "' | grep "^BASE_URL=" | cut --delimiter="=" --fields=2)"
|
|
||||||
ynh_branch="$(cat "./config.py" | tr -d ' "' | grep "^YNH_BRANCH=" | cut --delimiter="=" --fields=2)"
|
|
||||||
arch="$(dpkg --print-architecture)"
|
|
||||||
dist="$(cat "./config.py" | tr -d ' "' | grep "^DIST=" | cut --delimiter="=" --fields=2)"
|
|
||||||
|
|
||||||
# Enable chat notifications only on main CI
|
|
||||||
if [[ "$ynh_branch" == "stable" ]] && [[ "$arch" == "amd64" ]] && [[ -e "./maintenance/chat_notify.sh" ]]
|
|
||||||
then
|
|
||||||
chat_notify="./maintenance/chat_notify.sh"
|
|
||||||
else
|
|
||||||
chat_notify="true" # 'true' is a dummy program that won't do anything
|
|
||||||
fi
|
|
||||||
|
|
||||||
#=================================================
|
|
||||||
# Delay the beginning of this script, to prevent concurrent executions
|
|
||||||
#=================================================
|
|
||||||
|
|
||||||
# Get 3 ramdom digit. To build a value between 001 and 999
|
|
||||||
milli_sleep=$(head --lines=20 /dev/urandom | tr --complement --delete '0-9' | head --bytes=3)
|
|
||||||
# And wait for this value in millisecond
|
|
||||||
sleep "5.$milli_sleep"
|
|
||||||
|
|
||||||
#============================
|
|
||||||
# Check / take the lock
|
|
||||||
#=============================
|
|
||||||
|
|
||||||
if [ -e $lock_yunorunner ]
|
|
||||||
then
|
|
||||||
lock_yunorunner_PID="$(cat $lock_yunorunner)"
|
|
||||||
if [ -n "$lock_yunorunner_PID" ]
|
|
||||||
then
|
|
||||||
# We check that the corresponding PID is still running AND that the PPid is not 1 ..
|
|
||||||
# If the PPid is 1, it tends to indicate that a previous analyseCI is still running and was not killed, and therefore got adopted by init.
|
|
||||||
# This typically happens when the job is cancelled / restarted .. though we should have a better way of handling cancellation from yunorunner directly :/
|
|
||||||
if ps --pid $lock_yunorunner_PID | grep --quiet $lock_yunorunner_PID && [[ $(grep PPid /proc/${lock_yunorunner_PID}/status | awk '{print $2}') != "1" ]]
|
|
||||||
then
|
|
||||||
echo -e "\e[91m\e[1m!!! Another analyseCI process is currently using the lock $lock_yunorunner !!!\e[0m"
|
|
||||||
"$chat_notify" "CI miserably crashed because another process is using the lock"
|
|
||||||
sleep 10
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
[[ $(grep PPid /proc/$(lock_yunorunner_PID)/status | awk '{print $2}') != "1" ]] && { echo "Killing stale analyseCI process ..."; kill -s SIGTERM $lock_yunorunner_PID; sleep 30; }
|
|
||||||
echo "Removing stale lock"
|
|
||||||
rm -f $lock_yunorunner
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "$$" > $lock_yunorunner
|
|
||||||
|
|
||||||
#============================
|
|
||||||
# Cleanup after exit/kill
|
|
||||||
#=============================
|
|
||||||
|
|
||||||
function cleanup()
|
|
||||||
{
|
|
||||||
rm $lock_yunorunner
|
|
||||||
|
|
||||||
if [ -n "$package_check_pid" ]
|
|
||||||
then
|
|
||||||
kill -s SIGTERM $package_check_pid
|
|
||||||
WORKER_ID="$worker_id" ARCH="$arch" DIST="$dist" YNH_BRANCH="$ynh_branch" "./package_check/package_check.sh" --force-stop
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
trap cleanup EXIT
|
|
||||||
trap 'exit 2' TERM
|
|
||||||
|
|
||||||
#============================
|
|
||||||
# Test parameters
|
|
||||||
#=============================
|
|
||||||
|
|
||||||
repo="$1"
|
|
||||||
test_name="$2"
|
|
||||||
job_id="$3"
|
|
||||||
|
|
||||||
# Keep only the repositery
|
|
||||||
repo=$(echo $repo | cut --delimiter=';' --fields=1)
|
|
||||||
app="$(echo $test_name | awk '{print $1}')"
|
|
||||||
|
|
||||||
test_full_log=${app}_${arch}_${ynh_branch}_complete.log
|
|
||||||
test_json_results="./results/logs/${app}_${arch}_${ynh_branch}_results.json"
|
|
||||||
test_url="$BASE_URL/job/$job_id"
|
|
||||||
|
|
||||||
# Make sure /usr/local/bin is in the path, because that's where the lxc/lxd bin lives
|
|
||||||
export PATH=$PATH:/usr/local/bin
|
|
||||||
|
|
||||||
#=================================================
|
|
||||||
# Timeout handling utils
|
|
||||||
#=================================================
|
|
||||||
|
|
||||||
function watchdog() {
|
|
||||||
local package_check_pid=$1
|
|
||||||
# Start a loop while package check is working
|
|
||||||
while ps --pid $package_check_pid | grep --quiet $package_check_pid
|
|
||||||
do
|
|
||||||
sleep 10
|
|
||||||
|
|
||||||
if [ -e $lock_package_check ]
|
|
||||||
then
|
|
||||||
lock_timestamp="$(stat -c %Y $lock_package_check)"
|
|
||||||
current_timestamp="$(date +%s)"
|
|
||||||
if [[ "$(($current_timestamp - $lock_timestamp))" -gt "$TIMEOUT" ]]
|
|
||||||
then
|
|
||||||
kill -s SIGTERM $package_check_pid
|
|
||||||
rm -f $lock_package_check
|
|
||||||
force_stop "Package check aborted, timeout reached ($(( $TIMEOUT / 60 )) min)."
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ ! -e "./package_check/results-$worker_id.json" ]
|
|
||||||
then
|
|
||||||
force_stop "It looks like package_check did not finish properly ... on $test_url"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function force_stop() {
|
|
||||||
local message="$1"
|
|
||||||
|
|
||||||
echo -e "\e[91m\e[1m!!! $message !!!\e[0m"
|
|
||||||
|
|
||||||
"$chat_notify" "While testing $app: $message"
|
|
||||||
|
|
||||||
WORKER_ID="$worker_id" ARCH="$arch" DIST="$dist" YNH_BRANCH="$ynh_branch" "./package_check/package_check.sh" --force-stop
|
|
||||||
}
|
|
||||||
|
|
||||||
#=================================================
|
|
||||||
# The actual testing ...
|
|
||||||
#=================================================
|
|
||||||
|
|
||||||
# Exec package check according to the architecture
|
|
||||||
echo "$(date) - Starting a test for $app on architecture $arch distribution $dist with yunohost $ynh_branch"
|
|
||||||
|
|
||||||
rm -f "./package_check/Complete-$worker_id.log"
|
|
||||||
rm -f "./package_check/results-$worker_id.json"
|
|
||||||
|
|
||||||
# Here we use a weird trick with 'script -qefc'
|
|
||||||
# The reason is that :
|
|
||||||
# if running the command in background (with &) the corresponding command *won't be in a tty* (not sure exactly)
|
|
||||||
# therefore later, the command lxc exec -t *won't be in a tty* (despite the -t) and command outputs will appear empty...
|
|
||||||
# Instead, with the magic of script -qefc we can pretend to be in a tty somehow...
|
|
||||||
# Adapted from https://stackoverflow.com/questions/32910661/pretend-to-be-a-tty-in-bash-for-any-command
|
|
||||||
cmd="WORKER_ID=$worker_id ARCH=$arch DIST=$dist YNH_BRANCH=$ynh_branch nice --adjustment=10 './package_check/package_check.sh' '$repo' 2>&1"
|
|
||||||
script -qefc "$cmd" &
|
|
||||||
|
|
||||||
watchdog $! || exit 1
|
|
||||||
|
|
||||||
# Copy the complete log
|
|
||||||
cp "./package_check/Complete-$worker_id.log" "./results/logs/$test_full_log"
|
|
||||||
cp "./package_check/results-$worker_id.json" "$test_json_results"
|
|
||||||
rm -f "./package_check/Complete-$worker_id.log"
|
|
||||||
rm -f "./package_check/results-$worker_id.json"
|
|
||||||
[ ! -e "./package_check/summary.png" ] || cp "./package_check/summary.png" "./result/summary/${job_id}.png"
|
|
||||||
|
|
||||||
if [ -n "$BASE_URL" ]
|
|
||||||
then
|
|
||||||
full_log_path="$BASE_URL/logs/$test_full_log"
|
|
||||||
else
|
|
||||||
full_log_path="$(pwd)/logs/$test_full_log"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "The complete log for this application was duplicated and is accessible at $full_log_path"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "-------------------------------------------"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
#=================================================
|
|
||||||
# Check / update level of the app
|
|
||||||
#=================================================
|
|
||||||
|
|
||||||
public_result_list="./results/logs/list_level_${ynh_branch}_$arch.json"
|
|
||||||
[ -s "$public_result_list" ] || echo "{}" > "$public_result_list"
|
|
||||||
|
|
||||||
# Check that we have a valid json...
|
|
||||||
jq -e '' "$test_json_results" >/dev/null 2>/dev/null && bad_json="false" || bad_json="true"
|
|
||||||
|
|
||||||
# Get new level and previous level
|
|
||||||
app_level="$(jq -r ".level" "$test_json_results")"
|
|
||||||
previous_level="$(jq -r ".$app" "$public_result_list")"
|
|
||||||
|
|
||||||
# We post message on chat if we're running for tests on stable/amd64
|
|
||||||
if [ "$bad_json" == "true" ] || [ "$app_level" -eq 0 ]; then
|
|
||||||
message="Application $app completely failed the continuous integration tests"
|
|
||||||
elif [ -z "$previous_level" ]; then
|
|
||||||
message="Application $app rises from level (unknown) to level $app_level"
|
|
||||||
elif [ $app_level -gt $previous_level ]; then
|
|
||||||
message="Application $app rises from level $previous_level to level $app_level"
|
|
||||||
elif [ $app_level -lt $previous_level ]; then
|
|
||||||
message="Application $app goes down from level $previous_level to level $app_level"
|
|
||||||
elif [ $app_level -ge 6 ]; then
|
|
||||||
# Dont notify anything, reduce CI flood on app chatroom
|
|
||||||
message=""
|
|
||||||
else
|
|
||||||
message="Application $app stays at level $app_level"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Send chat notification
|
|
||||||
if [[ -n "$message" ]]
|
|
||||||
then
|
|
||||||
message+=" on $test_url"
|
|
||||||
echo $message
|
|
||||||
"$chat_notify" "$message"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Update/add the results from package_check in the public result list
|
|
||||||
if [ "$bad_json" == "false" ]
|
|
||||||
then
|
|
||||||
jq --argfile results "$test_json_results" ".\"$app\"=\$results" $public_result_list > $public_result_list.new
|
|
||||||
mv $public_result_list.new $public_result_list
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Annnd we're done !
|
|
||||||
echo "$(date) - Test completed"
|
|
||||||
|
|
||||||
[ "$app_level" -gt 5 ] && exit 0 || exit 1
|
|
|
@ -21,4 +21,4 @@
|
||||||
|
|
||||||
MCHOME="/opt/matrix-commander/"
|
MCHOME="/opt/matrix-commander/"
|
||||||
MCARGS="-c $MCHOME/credentials.json --store $MCHOME/store"
|
MCARGS="-c $MCHOME/credentials.json --store $MCHOME/store"
|
||||||
timeout 10 "$MCHOME/venv/bin/matrix-commander" $MCARGS -m "$@" --room 'yunohost-apps'
|
timeout 10 "$MCHOME/venv/bin/matrix-commander" $MCARGS -m "$@" --room 'yunohost-apps' --markdown
|
||||||
|
|
|
@ -83,35 +83,33 @@ function tweak_yunorunner() {
|
||||||
# Remove the original database, in order to rebuilt it with the new config.
|
# Remove the original database, in order to rebuilt it with the new config.
|
||||||
rm -f $YUNORUNNER_HOME/db.sqlite
|
rm -f $YUNORUNNER_HOME/db.sqlite
|
||||||
|
|
||||||
|
cat >$YUNORUNNER_HOME/config.py <<EOF
|
||||||
|
BASE_URL = "https://$domain/$ci_path"
|
||||||
|
PORT = $port
|
||||||
|
WORKER_COUNT = 1
|
||||||
|
YNH_BRANCH = "stable"
|
||||||
|
DIST = "$(grep "VERSION_CODENAME=" /etc/os-release | cut -d '=' -f 2)"
|
||||||
|
ARCH = "$(dpkg --print-architecture)"
|
||||||
|
PACKAGE_CHECK_DIR = "$YUNORUNNER_HOME/package_check/"
|
||||||
|
EOF
|
||||||
|
|
||||||
# For automatic / "main" CI we want to auto schedule jobs using the app list
|
# For automatic / "main" CI we want to auto schedule jobs using the app list
|
||||||
if [ $ci_type == "auto" ]
|
if [ $ci_type == "auto" ]
|
||||||
then
|
then
|
||||||
cat >$YUNORUNNER_HOME/config.py <<EOF
|
cat >$YUNORUNNER_HOME/config.py <<EOF
|
||||||
BASE_URL = "https://$domain/$ci_path"
|
|
||||||
PORT = $port
|
|
||||||
PATH_TO_ANALYZER = "$YUNORUNNER_HOME/analyze_yunohost_app.sh"
|
|
||||||
MONITOR_APPS_LIST = True
|
MONITOR_APPS_LIST = True
|
||||||
MONITOR_GIT = True
|
MONITOR_GIT = True
|
||||||
MONITOR_ONLY_GOOD_QUALITY_APPS = False
|
MONITOR_ONLY_GOOD_QUALITY_APPS = False
|
||||||
MONTHLY_JOBS = True
|
MONTHLY_JOBS = True
|
||||||
WORKER_COUNT = 1
|
|
||||||
YNH_BRANCH = "stable"
|
|
||||||
DIST = "$(grep "VERSION_CODENAME=" /etc/os-release | cut -d '=' -f 2)"
|
|
||||||
EOF
|
EOF
|
||||||
# For Dev CI, we want to control the job scheduling entirely
|
# For Dev CI, we want to control the job scheduling entirely
|
||||||
# (c.f. the github webhooks)
|
# (c.f. the github webhooks)
|
||||||
else
|
else
|
||||||
cat >$YUNORUNNER_HOME/config.py <<EOF
|
cat >$YUNORUNNER_HOME/config.py <<EOF
|
||||||
BASE_URL = "https://$domain/$ci_path"
|
|
||||||
PORT = $port
|
|
||||||
PATH_TO_ANALYZER = "$YUNORUNNER_HOME/analyze_yunohost_app.sh"
|
|
||||||
MONITOR_APPS_LIST = False
|
MONITOR_APPS_LIST = False
|
||||||
MONITOR_GIT = False
|
MONITOR_GIT = False
|
||||||
MONITOR_ONLY_GOOD_QUALITY_APPS = False
|
MONITOR_ONLY_GOOD_QUALITY_APPS = False
|
||||||
MONTHLY_JOBS = False
|
MONTHLY_JOBS = False
|
||||||
WORKER_COUNT = 1
|
|
||||||
YNH_BRANCH = "stable"
|
|
||||||
DIST = "$(grep "VERSION_CODENAME=" /etc/os-release | cut -d '=' -f 2)"
|
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
213
run.py
213
run.py
|
@ -103,7 +103,8 @@ api_logger = logging.getLogger("api")
|
||||||
app = Sanic(__name__, dumps=my_json_dumps)
|
app = Sanic(__name__, dumps=my_json_dumps)
|
||||||
app.static('/static', './static/')
|
app.static('/static', './static/')
|
||||||
|
|
||||||
loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)) + '/templates', encoding='utf8')
|
yunorunner_dir = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
loader = FileSystemLoader(yunorunner_dir + '/templates', encoding='utf8')
|
||||||
jinja = SanicJinja2(app, loader=loader)
|
jinja = SanicJinja2(app, loader=loader)
|
||||||
|
|
||||||
# to avoid conflict with vue.js
|
# to avoid conflict with vue.js
|
||||||
|
@ -181,7 +182,7 @@ def set_random_day_for_monthy_job():
|
||||||
|
|
||||||
|
|
||||||
async def create_job(app_id, repo_url, job_comment=""):
|
async def create_job(app_id, repo_url, job_comment=""):
|
||||||
job_name = f"{app_id}"
|
job_name = app_id
|
||||||
if job_comment:
|
if job_comment:
|
||||||
job_name += f" ({job_comment})"
|
job_name += f" ({job_comment})"
|
||||||
|
|
||||||
|
@ -429,23 +430,113 @@ async def jobs_dispatcher():
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async def cleanup_old_package_check_if_lock_exists(worker, job, ignore_error=False):
|
||||||
|
|
||||||
|
await asyncio.sleep(3)
|
||||||
|
|
||||||
|
if not os.path.exists(app.config.PACKAGE_CHECK_LOCK_PER_WORKER.format(worker_id=worker.id)):
|
||||||
|
return
|
||||||
|
|
||||||
|
job.log += f"Lock for worker {worker.id} still exist ... trying to cleanup old check process ...\n"
|
||||||
|
job.save()
|
||||||
|
task_logger.info(f"Lock for worker {worker.id} still exist ... trying to cleanup old check process ...")
|
||||||
|
|
||||||
|
cwd = os.path.split(app.config.PACKAGE_CHECK_PATH)[0]
|
||||||
|
env = {
|
||||||
|
"WORKER_ID": worker.id,
|
||||||
|
"ARCH": app.config.ARCH,
|
||||||
|
"DIST": app.config.DIST,
|
||||||
|
"YNH_BRANCH": app.config.YNH_BRANCH,
|
||||||
|
"PATH": os.environ["PATH"] + ":/usr/local/bin", # This is because lxc/lxd is in /usr/local/bin
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = f"{app.config.PACKAGE_CHECK_PATH} --force-stop"
|
||||||
|
try:
|
||||||
|
command = await asyncio.create_subprocess_shell(cmd,
|
||||||
|
cwd=cwd,
|
||||||
|
env=env,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE)
|
||||||
|
while not command.stdout.at_eof():
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc()
|
||||||
|
task_logger.exception(f"ERROR in job '{job.name} #{job.id}'")
|
||||||
|
|
||||||
|
job.log += "\n"
|
||||||
|
job.log += "Exception:\n"
|
||||||
|
job.log += traceback.format_exc()
|
||||||
|
|
||||||
|
if not ignore_error:
|
||||||
|
job.state = "error"
|
||||||
|
|
||||||
|
return False
|
||||||
|
except CancelledError:
|
||||||
|
command.terminate()
|
||||||
|
|
||||||
|
if not ignore_error:
|
||||||
|
job.log += "\nFailed to lill old check process?"
|
||||||
|
job.state = "canceled"
|
||||||
|
|
||||||
|
task_logger.info(f"Job '{job.name} #{job.id}' has been canceled")
|
||||||
|
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
job.log += "Cleaning done\n"
|
||||||
|
return True
|
||||||
|
finally:
|
||||||
|
job.save()
|
||||||
|
|
||||||
|
|
||||||
async def run_job(worker, job):
|
async def run_job(worker, job):
|
||||||
path_to_analyzer = app.config.PATH_TO_ANALYZER
|
|
||||||
|
|
||||||
await broadcast({
|
await broadcast({
|
||||||
"action": "update_job",
|
"action": "update_job",
|
||||||
"data": model_to_dict(job),
|
"data": model_to_dict(job),
|
||||||
}, ["jobs", f"job-{job.id}", f"app-jobs-{job.url_or_path}"])
|
}, ["jobs", f"job-{job.id}", f"app-jobs-{job.url_or_path}"])
|
||||||
|
|
||||||
# fake stupid command, whould run CI instead
|
cleanup_ret = await cleanup_old_package_check_if_lock_exists(worker, job)
|
||||||
|
if cleanup_ret is False:
|
||||||
|
return
|
||||||
|
|
||||||
|
job_app = job.name.split()[0]
|
||||||
|
|
||||||
task_logger.info(f"Starting job '{job.name}' #{job.id}...")
|
task_logger.info(f"Starting job '{job.name}' #{job.id}...")
|
||||||
|
|
||||||
cwd = os.path.split(path_to_analyzer)[0]
|
cwd = os.path.split(app.config.PACKAGE_CHECK_PATH)[0]
|
||||||
arguments = f' {job.url_or_path} "{job.name}" {job.id} {worker.id}'
|
env = {
|
||||||
task_logger.info(f"Launch command: /bin/bash " + path_to_analyzer + arguments)
|
"WORKER_ID": worker.id,
|
||||||
|
"ARCH": app.config.ARCH,
|
||||||
|
"DIST": app.config.DIST,
|
||||||
|
"YNH_BRANCH": app.config.YNH_BRANCH,
|
||||||
|
"PATH": os.environ["PATH"] + ":/usr/local/bin", # This is because lxc/lxd is in /usr/local/bin
|
||||||
|
}
|
||||||
|
|
||||||
|
now = datetime.now().strftime("%d/%m/%Y - %H:%M:%S")
|
||||||
|
msg = now + " - Starting test for {job.name} on arch {app.config.ARCH}, distrib {app.config.DIST}, with YunoHost {app.config.YNH_BRANCH}"
|
||||||
|
job.log += "=" * len(msg) + "\n"
|
||||||
|
job.log += msg + "\n"
|
||||||
|
job.log += "=" * len(msg) + "\n"
|
||||||
|
job.save()
|
||||||
|
|
||||||
|
result_json = app.config.PACKAGE_CHECK_RESULT_JSON_PER_WORKER.format(worker_id=worker.id)
|
||||||
|
full_log = app.config.PACKAGE_CHECK_FULL_LOG_PER_WORKER.format(worker_id=worker.id)
|
||||||
|
summary_png = app.config.PACKAGE_CHECK_SUMMARY_PNG_PER_WORKER.format(worker_id=worker.id)
|
||||||
|
|
||||||
|
if os.exists(result_json):
|
||||||
|
os.remove(result_json)
|
||||||
|
if os.exists(full_log):
|
||||||
|
os.remove(full_log)
|
||||||
|
if os.exists(summary_png):
|
||||||
|
os.remove(summary_png)
|
||||||
|
|
||||||
|
cmd = f"timeout {app.config.TIMEOUT} --signal=TERM nice --adjustment=10 /bin/bash {app.config.PACKAGE_CHECK_PATH} {job.url_or_path}"
|
||||||
|
task_logger.info(f"Launching command: {cmd}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
command = await asyncio.create_subprocess_shell("/bin/bash " + path_to_analyzer + arguments,
|
command = await asyncio.create_subprocess_shell(cmd,
|
||||||
cwd=cwd,
|
cwd=cwd,
|
||||||
|
env=env,
|
||||||
# default limit is not enough in some situations
|
# default limit is not enough in some situations
|
||||||
limit=(2 ** 16) ** 10,
|
limit=(2 ** 16) ** 10,
|
||||||
stdout=asyncio.subprocess.PIPE,
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
@ -460,9 +551,6 @@ async def run_job(worker, job):
|
||||||
job.log += "Uhoh ?! UnicodeDecodeError in yunorunner !?"
|
job.log += "Uhoh ?! UnicodeDecodeError in yunorunner !?"
|
||||||
job.log += str(e)
|
job.log += str(e)
|
||||||
|
|
||||||
# XXX seems to be okay performance wise but that's probably going to be
|
|
||||||
# a bottleneck at some point :/
|
|
||||||
# theoritically jobs are going to have slow output
|
|
||||||
job.save()
|
job.save()
|
||||||
|
|
||||||
await broadcast({
|
await broadcast({
|
||||||
|
@ -474,9 +562,7 @@ async def run_job(worker, job):
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
command.terminate()
|
command.terminate()
|
||||||
job.log += "\n"
|
job.log += "\n"
|
||||||
job.end_time = datetime.now()
|
|
||||||
job.state = "canceled"
|
job.state = "canceled"
|
||||||
job.save()
|
|
||||||
|
|
||||||
task_logger.info(f"Job '{job.name} #{job.id}' has been canceled")
|
task_logger.info(f"Job '{job.name} #{job.id}' has been canceled")
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -487,20 +573,84 @@ async def run_job(worker, job):
|
||||||
job.log += "Job error on:\n"
|
job.log += "Job error on:\n"
|
||||||
job.log += traceback.format_exc()
|
job.log += traceback.format_exc()
|
||||||
|
|
||||||
job.end_time = datetime.now()
|
|
||||||
job.state = "error"
|
job.state = "error"
|
||||||
job.save()
|
|
||||||
|
|
||||||
# XXX add mechanism to reschedule error jobs
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
task_logger.info(f"Finished job '{job.name}'")
|
task_logger.info(f"Finished job '{job.name}'")
|
||||||
|
|
||||||
await command.wait()
|
if command.returncode == 124:
|
||||||
|
job.log += f"\nJob timed out ({app.config.TIMEOUT / 60} min.)\n"
|
||||||
|
job.state = "error"
|
||||||
|
else:
|
||||||
|
if command.returncode != 0 or not os.path.exists(result_json):
|
||||||
|
job.log += f"\nJob failed ? Return code is {comman.returncode} / Or maybe the json result doesnt exist...\n"
|
||||||
|
job.state = "error"
|
||||||
|
else:
|
||||||
|
job.log += f"\nPackage check completed"
|
||||||
|
results = json.load(open(result_json))
|
||||||
|
level = results["level"]
|
||||||
|
job.state = "done" if level > 4 else "failure"
|
||||||
|
|
||||||
|
log.log += f"\nThe full log is available at {app.config.BASE_URL}/logs/{job.id}.log\n"
|
||||||
|
|
||||||
|
os.copy(full_log, yunorunner_dir + f"/results/logs/{job.id}.log")
|
||||||
|
os.copy(result_json, yunorunner_dir + f"/results/logs/{job_app}_{app.config.ARCH}_{app.config.YNH_BRANCH}_results.json")
|
||||||
|
os.copy(summary_png, yunorunner_dir + f"/results/summary/{job.id}.png")
|
||||||
|
|
||||||
|
public_result_json_path = yunorunner_dir + f"/results/logs/list_level_{app.config.YNH_BRANCH}_{app.config.ARCH}.json"
|
||||||
|
if os.path.exists(public_result_json_path) or not open(public_result_json_path).read().strip():
|
||||||
|
public_result = {}
|
||||||
|
else:
|
||||||
|
public_result = json.load(open(public_result_json_path))
|
||||||
|
|
||||||
|
public_result[job_app] = results
|
||||||
|
open(public_result_json_path, "w").write(json.dumps(public_result))
|
||||||
|
finally:
|
||||||
job.end_time = datetime.now()
|
job.end_time = datetime.now()
|
||||||
job.state = "done" if command.returncode == 0 else "failure"
|
|
||||||
|
now = datetime.datetime.now().strftime("%d/%m/%Y - %H:%M:%S")
|
||||||
|
msg = now + " - Finished job for {job.name}"
|
||||||
|
job.log += "=" * len(msg) + "\n"
|
||||||
|
job.log += msg + "\n"
|
||||||
|
job.log += "=" * len(msg) + "\n"
|
||||||
|
|
||||||
job.save()
|
job.save()
|
||||||
|
|
||||||
|
if "ci-apps.yunohost.org" in app.config.BASE_URL:
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(APPS_LIST) as resp:
|
||||||
|
data = await resp.json()
|
||||||
|
data = data["apps"]
|
||||||
|
public_level = data.get(job_app, {}).get("level")
|
||||||
|
|
||||||
|
job_url = app.config.BASE_URL + "/job/" + job.id
|
||||||
|
job_id_with_url = f"[#{job.id}]({job_url})"
|
||||||
|
if job.state == "error":
|
||||||
|
msg = f"Job {job_id_with_url} for {job_app} failed miserably :("
|
||||||
|
elif not level == 0:
|
||||||
|
msg = f"App {job_app} failed all tests in job {job_id_with_url} :("
|
||||||
|
elif public_level is None:
|
||||||
|
msg = f"App {job_app} rises from level (unknown) to {level} in job {job_id_with_url} !"
|
||||||
|
elif level > public_level:
|
||||||
|
msg = f"App {job_app} rises from level {public_level} to {level} in job {job_id_with_url} !"
|
||||||
|
elif level < public_level:
|
||||||
|
msg = f"App {job_app} goes down from level {public_level} to {level} in job {job_id_with_url}"
|
||||||
|
elif level < 6:
|
||||||
|
msg = f"App {job_app} stays at level {level} in job {job_id_with_url}"
|
||||||
|
else:
|
||||||
|
# Dont notify anything, reduce CI flood on app chatroom if app is already level 6+
|
||||||
|
msg = ""
|
||||||
|
|
||||||
|
if msg:
|
||||||
|
cmd = f"{yunorunner_dir}/maintenance/chat_notify.sh '{msg}'"
|
||||||
|
try:
|
||||||
|
command = await asyncio.create_subprocess_shell(cmd)
|
||||||
|
while not command.stdout.at_eof():
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
await cleanup_old_package_check_if_lock_exists(worker, job, ignore_error=True)
|
||||||
|
|
||||||
# remove ourself from the state
|
# remove ourself from the state
|
||||||
del jobs_in_memory_state[job.id]
|
del jobs_in_memory_state[job.id]
|
||||||
|
|
||||||
|
@ -1107,8 +1257,7 @@ async def github(request):
|
||||||
|
|
||||||
# Check the comment contains proper keyword trigger
|
# Check the comment contains proper keyword trigger
|
||||||
body = hook_infos["comment"]["body"].strip()[:100].lower()
|
body = hook_infos["comment"]["body"].strip()[:100].lower()
|
||||||
triggers = ["!testme", "!gogogadgetoci", "By the power of systemd, I invoke The Great App CI to test this Pull Request!"]
|
if not any(trigger.lower() in body for trigger in app.config.WEBHOOK_TRIGGERS):
|
||||||
if not any(trigger.lower() in body for trigger in triggers):
|
|
||||||
# Nothing to do but success anyway (204 = No content)
|
# Nothing to do but success anyway (204 = No content)
|
||||||
return response.json({'msg': "Nothing to do"}, 204)
|
return response.json({'msg': "Nothing to do"}, 204)
|
||||||
|
|
||||||
|
@ -1182,8 +1331,7 @@ async def github(request):
|
||||||
respjson = await resp.json()
|
respjson = await resp.json()
|
||||||
api_logger.info("Added comment %s" % respjson["html_url"])
|
api_logger.info("Added comment %s" % respjson["html_url"])
|
||||||
|
|
||||||
catchphrases = ["Alrighty!", "Fingers crossed!", "May the CI gods be with you!", ":carousel_horse:", ":rocket:", ":sunflower:", "Meow :cat2:", ":v:", ":stuck_out_tongue_winking_eye:" ]
|
catchphrase = random.choice(app.config.WEBHOOK_CATCHPHRASES)
|
||||||
catchphrase = random.choice(catchphrases)
|
|
||||||
# Dirty hack with BASE_URL passed from cmd argument because we can't use request.url_for because Sanic < 20.x
|
# Dirty hack with BASE_URL passed from cmd argument because we can't use request.url_for because Sanic < 20.x
|
||||||
job_url = app.config.BASE_URL + app.url_for("html_job", job_id=job.id)
|
job_url = app.config.BASE_URL + app.url_for("html_job", job_id=job.id)
|
||||||
badge_url = app.config.BASE_URL + app.url_for("api_badge_job", job_id=job.id)
|
badge_url = app.config.BASE_URL + app.url_for("api_badge_job", job_id=job.id)
|
||||||
|
@ -1258,21 +1406,32 @@ def main(config="./config.py"):
|
||||||
default_config = {
|
default_config = {
|
||||||
"BASE_URL": "",
|
"BASE_URL": "",
|
||||||
"PORT": 4242,
|
"PORT": 4242,
|
||||||
|
"TIMEOUT": 10800,
|
||||||
"DEBUG": False,
|
"DEBUG": False,
|
||||||
"PATH_TO_ANALYZER": "./analyze_yunohost_app.sh",
|
|
||||||
"MONITOR_APPS_LIST": False,
|
"MONITOR_APPS_LIST": False,
|
||||||
"MONITOR_GIT": False,
|
"MONITOR_GIT": False,
|
||||||
"MONITOR_ONLY_GOOD_QUALITY_APPS": False,
|
"MONITOR_ONLY_GOOD_QUALITY_APPS": False,
|
||||||
"MONTHLY_JOBS": False,
|
"MONTHLY_JOBS": False,
|
||||||
"ANSWER_TO_AUTO_UPDATER": True,
|
"ANSWER_TO_AUTO_UPDATER": True,
|
||||||
"WORKER_COUNT": 1,
|
"WORKER_COUNT": 1,
|
||||||
|
"ARCH": "amd64",
|
||||||
|
"DIST": "bullseye",
|
||||||
|
"PACKAGE_CHECK_DIR": "./package_check/",
|
||||||
|
"WEBHOOK_TRIGGERS": ["!testme", "!gogogadgetoci", "By the power of systemd, I invoke The Great App CI to test this Pull Request!"],
|
||||||
|
"WEBHOOK_CATCHPHRASES": ["Alrighty!", "Fingers crossed!", "May the CI gods be with you!", ":carousel_horse:", ":rocket:", ":sunflower:", "Meow :cat2:", ":v:", ":stuck_out_tongue_winking_eye:"],
|
||||||
}
|
}
|
||||||
|
|
||||||
app.config.update_config(default_config)
|
app.config.update_config(default_config)
|
||||||
app.config.update_config(config)
|
app.config.update_config(config)
|
||||||
|
|
||||||
if not os.path.exists(app.config.PATH_TO_ANALYZER):
|
app.config.PACKAGE_CHECK_PATH = app.config.PACKAGE_CHECK_DIR + "package_check.sh"
|
||||||
print(f"Error: analyzer script doesn't exist at '{app.config.PATH_TO_ANALYZER}'. Please fix the configuration in {config}")
|
app.config.PACKAGE_CHECK_LOCK_PER_WORKER = app.config.PACKAGE_CHECK_DIR + "pcheck-{worker_id}.lock"
|
||||||
|
app.config.PACKAGE_CHECK_FULL_LOG_PER_WORKER = app.config.PACKAGE_CHECK_DIR + "full_log_{worker_id}.log"
|
||||||
|
app.config.PACKAGE_CHECK_RESULT_JSON_PER_WORKER = app.config.PACKAGE_CHECK_DIR + "results_{worker_id}.json"
|
||||||
|
app.config.PACKAGE_CHECK_SUMMARY_PNG_PER_WORKER = app.config.PACKAGE_CHECK_DIR + "summary_{worker_id}.png"
|
||||||
|
|
||||||
|
if not os.path.exists(app.config.PACKAGE_CHECK_PATH):
|
||||||
|
print(f"Error: analyzer script doesn't exist at '{app.config.PACKAGE_CHECK_PATH}'. Please fix the configuration in {config}")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if app.config.MONITOR_APPS_LIST:
|
if app.config.MONITOR_APPS_LIST:
|
||||||
|
|
Loading…
Add table
Reference in a new issue