1
0
Fork 0
mirror of https://github.com/YunoHost-Apps/nomad_ynh.git synced 2024-09-03 19:55:53 +02:00

Packaging v2

This commit is contained in:
Félix Piédallu 2024-02-14 14:13:22 +01:00 committed by Félix Piédallu
parent 9efc56b426
commit a49bcf19ae
15 changed files with 314 additions and 943 deletions

View file

@ -1,47 +0,0 @@
;; Test complet client
; Manifest
domain="domain.tld"
is_public=1
node_type="client"
bootstrap_expect="1"
retry_join="192.168.1.100"
server_ip="192.168.1.100"
driver_lxc=1
; Checks
pkg_linter=1
setup_sub_dir=0
setup_root=1
setup_nourl=0
setup_private=1
setup_public=1
upgrade=1
#upgrade=1 from_commit=CommitHash
backup_restore=1
multi_instance=0
port_already_use=0
change_url=1
;; Test complet server
; Manifest
domain="domain.tld"
is_public=1
node_type="server"
bootstrap_expect="1"
retry_join="192.168.1.100"
server_ip="none..."
driver_lxc=1
; Checks
pkg_linter=1
setup_sub_dir=0
setup_root=1
setup_nourl=0
setup_private=1
setup_public=1
upgrade=1
#upgrade=1 from_commit=CommitHash
backup_restore=1
multi_instance=0
port_already_use=0
change_url=1
;;; Options
Email=
Notification=none

View file

@ -1,41 +1,41 @@
#----------------------- client-specific options ---------------------
client {
# A boolean indicating if client mode is enabled. All other client configuration options depend on this value.
# Defaults to false.
# Defaults to false.
enabled = true
# This is the state dir used to store client state. By default, it lives inside of the data_dir, in the
# "client" sub-path.
# "client" sub-path.
# state_dir = "/tmp/client"
# A directory used to store allocation data. Depending on the workload, the size of this directory can grow
# arbitrarily large as it is used to store downloaded artifacts for drivers (QEMU images, JAR files, etc.). It is therefore
# important to ensure this directory is placed some place on the filesystem with adequate storage capacity. By default, this
# directory lives under the data_dir at the "alloc" sub-path.
# directory lives under the data_dir at the "alloc" sub-path.
# alloc_dir = "/tmp/alloc"
# An array of server addresses. This list is used to register the client with the server nodes and advertise
# the available resources so that the agent can receive work.
servers = ["__SERVER_IP__:__RPC_PORT__"]
# the available resources so that the agent can receive work.
servers = ["__SERVER_IP__:__PORT_RPC__"]
# This is the value used to uniquely identify the local agent's node registration with the servers. This can be any arbitrary
# string but must be unique to the cluster. By default, if not specified, a randomly- generate UUID will be used.
# string but must be unique to the cluster. By default, if not specified, a randomly- generate UUID will be used.
# node_id = "foo"
# A string used to logically group client nodes by class. This can be used during job placement as a filter.
# This option is not required and has no default.
# This option is not required and has no default.
# node_class = "experimentation"
# This is a key/value mapping of metadata pairs. This is a free-form map and can contain any string values.
# This is a key/value mapping of metadata pairs. This is a free-form map and can contain any string values.
meta {}
# This is a key/value mapping of internal configuration for clients, such as for driver configuration.
# This is a key/value mapping of internal configuration for clients, such as for driver configuration.
options {}
# This is a string to force network fingerprinting to use a specific network interface
# This is a string to force network fingerprinting to use a specific network interface
# network_interface = "eth0"
# This is an int that sets the default link speed of network interfaces, in megabits, if their speed can not be
# determined dynamically.
# determined dynamically.
network_speed = 100
}

View file

@ -1,7 +0,0 @@
SOURCE_URL=https://github.com/hashicorp/nomad-driver-lxc/archive/68239f4f639bde68e80616b7e931b8cc368969b0.tar.gz
SOURCE_SUM=50ddae947a189fefe0f6a5419d8f5ae749daa124f100b3ce900d83eab073c2ad
SOURCE_SUM_PRG=sha256sum
SOURCE_FORMAT=tar.gz
SOURCE_IN_SUBDIR=true
SOURCE_FILENAME=
SOURCE_EXTRACT=true

View file

@ -1,7 +1,7 @@
#sub_path_only rewrite ^__PATH__$ __PATH__/ permanent;
location __PATH__/ {
proxy_pass http://127.0.0.1:__HTTP_PORT__;
proxy_pass http://127.0.0.1:__PORT__;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Nomad blocking queries will remain open for a default of 5 minutes.

View file

@ -1,11 +1,11 @@
# -------------- General options ---------------
# Specifies the region the Nomad agent is a member of. A region typically maps to a
# geographic region, for example USA, with potentially multiple zones, which map to
# geographic region, for example USA, with potentially multiple zones, which map to
# datacenters such as us-west and us-east. Defaults to global.
#region = "USA"
# Datacenter of the local agent. All members of a datacenter should share a local
# Datacenter of the local agent. All members of a datacenter should share a local
# LAN connection. Defaults to dc1.
#datacenter = "data-center-one"
@ -17,14 +17,14 @@
# A local directory used to store agent state. Client nodes use this directory by
# default to store temporary allocation data as well as cluster information. Server
# nodes use this directory to store cluster state, including the replicated log and
# snapshot data. This option is required to start the Nomad agent.
data_dir = "__DATADIR__"
# snapshot data. This option is required to start the Nomad agent.
data_dir = "__DATA_DIR__"
# Controls the verbosity of logs the Nomad agent will output. Valid log levels include
# WARN, INFO, or DEBUG in increasing order of verbosity. Defaults to INFO.
# WARN, INFO, or DEBUG in increasing order of verbosity. Defaults to INFO.
#log_level = "DEBUG"
# Used to indicate which address the Nomad agent should bind to for network services,
# Used to indicate which address the Nomad agent should bind to for network services,
# including the HTTP interface as well as the internal gossip protocol and RPC mechanism.
# This should be specified in IP format, and can be used to easily bind all network services
# to the same address. It is also possible to bind the individual services to different
@ -40,15 +40,15 @@ enable_debug = false
# Controls the network ports used for different services required by the Nomad agent.
ports {
# The port used to run the HTTP server. Applies to both client and server nodes. Defaults to __HTTP_PORT__.
http = __HTTP_PORT__
http = __PORT__
# The port used for internal RPC communication between agents and servers, and for inter-server
# traffic for the consensus algorithm (raft). Defaults to __RPC_PORT__. Only used on server nodes.
rpc = __RPC_PORT__
# traffic for the consensus algorithm (raft). Defaults to __PORT_RPC__. Only used on server nodes.
rpc = __PORT_RPC__
# The port used for the gossip protocol for cluster membership. Both TCP and UDP should be routable
# between the server nodes on this port. Defaults to __SERF_PORT__. Only used on server nodes.
serf = __SERF_PORT__
# between the server nodes on this port. Defaults to __PORT_SERF__. Only used on server nodes.
serf = __PORT_SERF__
}
# Controls the bind address for individual network services. Any values configured in this block
@ -56,16 +56,16 @@ ports {
addresses {
# The address the HTTP server is bound to. This is the most common bind address to change.
# Applies to both clients and servers.
# Applies to both clients and servers.
# http = "0.0.0.0"
# The address to bind the internal RPC interfaces to. Should be exposed only to other cluster
# members if possible. Used only on server nodes, but must be accessible from all agents.
# members if possible. Used only on server nodes, but must be accessible from all agents.
# rpc = "0.0.0.0"
# The address used to bind the gossip layer to. Both a TCP and UDP listener will be exposed on this
# address. Should be restricted to only server nodes from the same datacenter if possible.
# Used only on server nodes.
# Used only on server nodes.
# serf = "0.0.0.0"
}
@ -76,12 +76,12 @@ addresses {
#advertise {
# The address to advertise for the RPC interface. This address should be reachable by all of
# the agents in the cluster.
# rpc = "1.2.3.4:__RPC_PORT__"
# the agents in the cluster.
# rpc = "1.2.3.4:__PORT_RPC__"
# The address advertised for the gossip layer. This address must be reachable from all server nodes.
# It is not required that clients can reach this address.
# serf = "1.2.3.4:__SERF_PORT__"
# It is not required that clients can reach this address.
# serf = "1.2.3.4:__PORT_SERF__"
#}
# Used to control how the Nomad agent exposes telemetry data to external metrics collection servers.
@ -93,7 +93,7 @@ telemetry {
# statsd_address = "1.2.3.4:5678"
# A boolean indicating if gauge values should not be prefixed with the local hostname.
# disable_hostname = false
# disable_hostname = false
}
# Enables gracefully leaving when receiving the interrupt signal. By default, the agent will
@ -101,7 +101,7 @@ telemetry {
leave_on_interrupt = false
# Enables gracefully leaving when receiving the terminate signal. By default, the agent will
# exit forcefully on any signal.
# exit forcefully on any signal.
leave_on_terminate = false
# Enables logging to syslog. This option only works on Unix based systems.

View file

@ -1,109 +0,0 @@
{
"name": "Nomad",
"id": "nomad",
"packaging_format": 1,
"description": {
"en": "Simple and flexible workload orchestrator"
},
"version": "1.7.7~ynh1",
"url": "https://www.nomadproject.io/",
"upstream": {
"license": "MPL-2.0",
"website": "https://www.nomadproject.io/",
"admindoc": "https://www.nomadproject.io/docs",
"code": "https://github.com/hashicorp/nomad"
},
"license": "MPL-2.0",
"maintainer": {
"name": "",
"email": ""
},
"requirements": {
"yunohost": ">= 11.2"
},
"multi_instance": false,
"services": [
"nginx"
],
"arguments": {
"install": [
{
"name": "domain",
"type": "domain"
},
{
"name": "is_public",
"type": "boolean",
"default": true
},
{
"name": "node_type",
"type": "select",
"ask": {
"en": "What kind of Nomad node you want to install ?"
},
"choices": [
"server",
"client"
],
"default": "server"
},
{
"name": "bootstrap_expect",
"type": "select",
"ask": {
"en": "[Server only] How many server nodes to wait for before bootstrapping ?"
},
"choices": [
"1",
"3",
"5",
"7",
"9"
],
"default": "1",
"help": {
"en": "For production, it's recommanded to have 3 to 5 server nodes."
}
},
{
"name": "retry_join",
"type": "string",
"ask": {
"en": "[Server only] What is the IP of another server to join ?"
},
"example": "192.168.1.100",
"optional": true
},
{
"name": "server_ip",
"type": "string",
"ask": {
"en": "[Client only] What is the IP of the Nomad server node ?"
},
"example": "192.168.1.100",
"optional": true
},
{
"name": "server_ip",
"type": "string",
"ask": {
"en": "[Client only] What is the IP of the Nomad server node ?"
},
"example": "192.168.1.100",
"optional": true
},
{
"name": "driver_lxc",
"type": "boolean",
"ask": {
"en": "[Client only] Do you want to install LXC driver ?"
},
"default": true,
"help": {
"en": "It will also install lxc."
}
}
]
}
}

107
manifest.toml Normal file
View file

@ -0,0 +1,107 @@
#:schema https://raw.githubusercontent.com/YunoHost/apps/master/schemas/manifest.v2.schema.json
packaging_format = 2
id = "nomad"
name = "Nomad"
description.en = "Simple and flexible workload orchestrator"
version = "1.7.7~ynh2"
maintainers = []
[upstream]
license = "MPL-2.0"
website = "https://www.nomadproject.io"
admindoc = "https://www.nomadproject.io/docs"
code = "https://github.com/hashicorp/nomad"
cpe = "cpe:2.3:a:hashicorp:nomad"
[integration]
yunohost = ">= 11.2"
architectures = "all"
multi_instance = false
ldap = "not_relevant"
sso = "not_relevant"
disk = "50M" # FIXME: replace with an **estimate** minimum disk requirement. e.g. 20M, 400M, 1G, ...
ram.build = "50M" # FIXME: replace with an **estimate** minimum ram requirement. e.g. 50M, 400M, 1G, ...
ram.runtime = "50M" # FIXME: replace with an **estimate** minimum ram requirement. e.g. 50M, 400M, 1G, ...
[install]
[install.domain]
type = "domain"
[install.init_main_permission]
type = "group"
default = "visitors"
[install.node_type]
ask.en = "What kind of Nomad node you want to install ?"
type = "select"
choices = ["server", "client"]
default = "server"
[install.bootstrap_expect]
ask.en = "[Server only] How many server nodes to wait for before bootstrapping ?"
help.en = "For production, it's recommanded to have 3 to 5 server nodes."
type = "select"
choices = ["1", "3", "5", "7", "9"]
default = "1"
[install.retry_join]
ask.en = "[Server only] What is the IP of another server to join ?"
type = "string"
example = "192.168.1.100"
optional = true
[install.server_ip]
ask.en = "[Client only] What is the IP of the Nomad server node ?"
type = "string"
example = "192.168.1.100"
optional = true
[install.driver_lxc]
ask.en = "[Client only] Do you want to install LXC driver ?"
help.en = "It will also install lxc."
type = "boolean"
default = true
[resources]
[resources.sources.driver_lxc]
url = "https://github.com/hashicorp/nomad-driver-lxc/archive/68239f4f639bde68e80616b7e931b8cc368969b0.tar.gz"
sha256 = "50ddae947a189fefe0f6a5419d8f5ae749daa124f100b3ce900d83eab073c2ad"
[resources.system_user]
[resources.install_dir]
[resources.data_dir]
subdirs = ["plugins"]
[resources.permissions]
main.url = "/"
[resources.ports]
main.default = 4646
rpc.default = 4647
rpc.fixed = true
rpc.exposed = "TCP"
serf.default = 4648
serf.fixed = true
serf.exposed = "TCP"
[resources.apt]
packages = []
packages_from_raw_bash = """
if [ "$node_type" == "client" ]; then
if [ "$driver_lxc" -eq 1 ]; then
echo pkg-config lxc-dev lxc lxc-templates
fi
fi
"""
[resources.apt.extras.nomad]
repo = "deb https://apt.releases.hashicorp.com bullseye main"
key = "https://apt.releases.hashicorp.com/gpg"
packages = ["nomad"]

View file

@ -4,15 +4,6 @@
# COMMON VARIABLES
#=================================================
# dependencies used by the app (must be on a single line)
pkg_dependencies=""
extra_pkg_dependencies="nomad"
server_pkg_dependencies=""
client_pkg_dependencies=""
client_lxc_pkg_dependencies="pkg-config lxc-dev lxc lxc-templates"
go_version=1.20
#=================================================

View file

@ -10,64 +10,41 @@
source ../settings/scripts/_common.sh
source /usr/share/yunohost/helpers
#=================================================
# MANAGE SCRIPT FAILURE
#=================================================
ynh_clean_setup () {
true
}
# Exit if an error occurs during the execution of the script
ynh_abort_if_errors
#=================================================
# LOAD SETTINGS
#=================================================
ynh_print_info --message="Loading installation settings..."
app=$YNH_APP_INSTANCE_NAME
config_path=$(ynh_app_setting_get --app=$app --key=config_path)
domain=$(ynh_app_setting_get --app=$app --key=domain)
datadir=$(ynh_app_setting_get --app=$app --key=datadir)
#=================================================
# DECLARE DATA AND CONF FILES TO BACKUP
#=================================================
ynh_print_info --message="Declaring files to be backed up..."
#=================================================
# BACKUP THE APP MAIN DIR
#=================================================
ynh_backup --src_path="$install_dir"
#=================================================
# BACKUP THE DATA DIR
#=================================================
ynh_backup --src_path="$datadir" --is_big
ynh_backup --src_path="$data_dir" --is_big
#=================================================
# BACKUP THE NGINX CONFIGURATION
# SYSTEM CONFIGURATION
#=================================================
ynh_backup --src_path="/etc/nginx/conf.d/$domain.d/$app.conf"
#=================================================
# SPECIFIC BACKUP
#=================================================
# BACKUP LOGROTATE
#=================================================
ynh_backup --src_path="/etc/systemd/system/$app.service"
ynh_backup --src_path="/etc/logrotate.d/$app"
#=================================================
# BACKUP SYSTEMD
#=================================================
ynh_backup --src_path="/etc/systemd/system/$app.service"
#=================================================
# BACKUP VARIOUS FILES
#=================================================
ynh_backup --src_path="$config_path"
ynh_backup --src_path="/var/log/$app/"
#=================================================
# END OF SCRIPT
#=================================================

View file

@ -9,62 +9,6 @@
source _common.sh
source /usr/share/yunohost/helpers
#=================================================
# RETRIEVE ARGUMENTS
#=================================================
old_domain=$YNH_APP_OLD_DOMAIN
old_path=$YNH_APP_OLD_PATH
new_domain=$YNH_APP_NEW_DOMAIN
new_path=$YNH_APP_NEW_PATH
app=$YNH_APP_INSTANCE_NAME
#=================================================
# LOAD SETTINGS
#=================================================
ynh_script_progression --message="Loading installation settings..." --weight=1
# Needed for helper "ynh_add_nginx_config"
config_path=$(ynh_app_setting_get --app=$app --key=config_path)
# Add settings here as needed by your application
http_port=$(ynh_app_setting_get --app=$app --key=http_port)
#=================================================
# BACKUP BEFORE CHANGE URL THEN ACTIVE TRAP
#=================================================
ynh_script_progression --message="Backing up the app before changing its URL (may take a while)..." --weight=1
# Backup the current version of the app
ynh_backup_before_upgrade
ynh_clean_setup () {
# Remove the new domain config file, the remove script won't do it as it doesn't know yet its location.
ynh_secure_remove --file="/etc/nginx/conf.d/$new_domain.d/$app.conf"
# Restore it if the upgrade fails
ynh_restore_upgradebackup
}
# Exit if an error occurs during the execution of the script
ynh_abort_if_errors
#=================================================
# CHECK WHICH PARTS SHOULD BE CHANGED
#=================================================
change_domain=0
if [ "$old_domain" != "$new_domain" ]
then
change_domain=1
fi
change_path=0
if [ "$old_path" != "$new_path" ]
then
change_path=1
fi
#=================================================
# STANDARD MODIFICATIONS
#=================================================
@ -72,42 +16,14 @@ fi
#=================================================
ynh_script_progression --message="Stopping a systemd service..." --weight=1
ynh_systemd_action --service_name=$app --action="stop" --log_path="/var/log/$app/$app.log"
ynh_systemd_action --service_name="$app" --action="stop" --log_path="/var/log/$app/$app.log"
#=================================================
# MODIFY URL IN NGINX CONF
#=================================================
ynh_script_progression --message="Updating NGINX web server configuration..." --weight=1
nginx_conf_path=/etc/nginx/conf.d/$old_domain.d/$app.conf
# Change the path in the NGINX config file
if [ $change_path -eq 1 ]
then
# Make a backup of the original NGINX config file if modified
ynh_backup_if_checksum_is_different --file="$nginx_conf_path"
# Set global variables for NGINX helper
domain="$old_domain"
path_url="$new_path"
# Create a dedicated NGINX config
ynh_add_nginx_config
fi
# Change the domain for NGINX
if [ $change_domain -eq 1 ]
then
# Delete file checksum for the old conf file location
ynh_delete_file_checksum --file="$nginx_conf_path"
mv $nginx_conf_path /etc/nginx/conf.d/$new_domain.d/$app.conf
# Store file checksum for the new config file location
ynh_store_file_checksum --file="/etc/nginx/conf.d/$new_domain.d/$app.conf"
fi
#=================================================
# SPECIFIC MODIFICATIONS
#=================================================
# ...
#=================================================
ynh_change_url_nginx_config
#=================================================
# GENERIC FINALISATION
@ -117,14 +33,7 @@ fi
ynh_script_progression --message="Starting a systemd service..." --weight=1
# Start a systemd service
ynh_systemd_action --service_name=$app --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# RELOAD NGINX
#=================================================
ynh_script_progression --message="Reloading NGINX web server..." --weight=1
ynh_systemd_action --service_name=nginx --action=reload
ynh_systemd_action --service_name="$app" --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# END OF SCRIPT

View file

@ -11,168 +11,41 @@ source ynh_install_go
source /usr/share/yunohost/helpers
#=================================================
# MANAGE SCRIPT FAILURE
# INITIALIZE AND STORE SETTINGS
#=================================================
ynh_clean_setup () {
true
}
# Exit if an error occurs during the execution of the script
ynh_abort_if_errors
#=================================================
# RETRIEVE ARGUMENTS FROM THE MANIFEST
#=================================================
domain=$YNH_APP_ARG_DOMAIN
path_url="/"
is_public=$YNH_APP_ARG_IS_PUBLIC
node_type=$YNH_APP_ARG_NODE_TYPE
bootstrap_expect=$YNH_APP_ARG_BOOTSTRAP_EXPECT
retry_join=$YNH_APP_ARG_RETRY_JOIN
server_ip=$YNH_APP_ARG_SERVER_IP
driver_lxc=$YNH_APP_ARG_DRIVER_LXC
app=$YNH_APP_INSTANCE_NAME
client_lxc_bridge="lxcbr0"
client_lxc_plage_ip="10.1.44"
client_lxc_main_iface=$(ip route | grep default | awk '{print $5;}')
ynh_app_setting_set --app="$app" --key=client_lxc_bridge --value="$client_lxc_bridge"
ynh_app_setting_set --app="$app" --key=client_lxc_plage_ip --value="$client_lxc_plage_ip"
ynh_app_setting_set --app="$app" --key=client_lxc_main_iface --value="$client_lxc_main_iface"
#=================================================
# CHECK IF THE APP CAN BE INSTALLED WITH THESE ARGS
#=================================================
ynh_script_progression --message="Validating installation parameters..." --weight=1
# Register (book) web path
ynh_webpath_register --app=$app --domain=$domain --path_url=$path_url
#=================================================
# STORE SETTINGS FROM MANIFEST
#=================================================
ynh_script_progression --message="Storing installation settings..." --weight=1
ynh_app_setting_set --app=$app --key=domain --value=$domain
ynh_app_setting_set --app=$app --key=path --value=$path_url
ynh_app_setting_set --app=$app --key=node_type --value=$node_type
ynh_app_setting_set --app=$app --key=bootstrap_expect --value=$bootstrap_expect
ynh_app_setting_set --app=$app --key=retry_join --value=$retry_join
ynh_app_setting_set --app=$app --key=server_ip --value=$server_ip
ynh_app_setting_set --app=$app --key=driver_lxc --value=$driver_lxc
ynh_app_setting_set --app=$app --key=client_lxc_bridge --value=$client_lxc_bridge
ynh_app_setting_set --app=$app --key=client_lxc_plage_ip --value=$client_lxc_plage_ip
ynh_app_setting_set --app=$app --key=client_lxc_main_iface --value=$client_lxc_main_iface
#=================================================
# STANDARD MODIFICATIONS
#=================================================
# FIND AND OPEN A PORT
#=================================================
ynh_script_progression --message="Finding an available port..." --weight=1
# Find an available port
http_port=4646
ynh_port_available --port=$http_port || ynh_die --message="Port $http_port is needs to be available for this app"
ynh_app_setting_set --app=$app --key=http_port --value=$http_port
rpc_port=4647
ynh_port_available --port=$rpc_port || ynh_die --message="Port $rpc_port is needs to be available for this app"
ynh_app_setting_set --app=$app --key=rpc_port --value=$rpc_port
serf_port=4648
ynh_port_available --port=$serf_port || ynh_die --message="Port $serf_port is needs to be available for this app"
ynh_app_setting_set --app=$app --key=serf_port --value=$serf_port
# Open the port
ynh_script_progression --message="Configuring firewall..." --weight=1
ynh_exec_warn_less yunohost firewall allow --no-upnp TCP $rpc_port
needs_exposed_ports="$rpc_port"
if [ "$node_type" == "server" ]
then
ynh_exec_warn_less yunohost firewall allow --no-upnp TCP $serf_port
needs_exposed_ports="$serf_port $needs_exposed_ports"
fi
#=================================================
# INSTALL DEPENDENCIES
#=================================================
ynh_script_progression --message="Installing dependencies..." --weight=1
if [ "$node_type" == "server" ]
then
pkg_dependencies="$pkg_dependencies $server_pkg_dependencies"
fi
if [ "$node_type" == "client" ]
then
if [ $driver_lxc -eq 1 ]
then
client_pkg_dependencies="$client_pkg_dependencies $client_lxc_pkg_dependencies"
ynh_exec_warn_less ynh_install_go --go_version=$go_version
fi
pkg_dependencies="$pkg_dependencies $client_pkg_dependencies"
fi
ynh_install_app_dependencies $pkg_dependencies
ynh_install_extra_app_dependencies --repo="deb https://apt.releases.hashicorp.com $(lsb_release -cs) main" --package="$extra_pkg_dependencies" --key="https://apt.releases.hashicorp.com/gpg"
#=================================================
# CREATE DEDICATED USER
#=================================================
ynh_script_progression --message="Configuring system user..." --weight=1
# Create a system user
ynh_system_user_create --username=$app
#=================================================
# NGINX CONFIGURATION
#=================================================
ynh_script_progression --message="Configuring NGINX web server..." --weight=1
# Create a dedicated NGINX config
ynh_add_nginx_config
#=================================================
# SPECIFIC SETUP
#=================================================
# CREATE DATA DIRECTORY
#=================================================
ynh_script_progression --message="Creating a data directory..." --weight=1
ynh_script_progression --message="Configuring the data directory..." --weight=1
datadir=/home/yunohost.app/$app
ynh_app_setting_set --app=$app --key=datadir --value=$datadir
mkdir -p $datadir
mkdir -p $datadir/plugins
chmod 750 "$datadir"
chmod -R o-rwx "$datadir"
chown -R $app:$app "$datadir"
chmod -R o-rwx "$data_dir"
chown -R "$app:$app" "$data_dir"
#=================================================
# BUILD DRIVERS
#=================================================
if [ "$node_type" == "client" ] && [ "$driver_lxc" -eq 1 ]; then
ynh_script_progression --message="Building LXC driver..."
if [ "$node_type" == "client" ]
then
if [ $driver_lxc -eq 1 ]
then
ynh_script_progression --message="Building LXC driver..."
ynh_exec_warn_less ynh_install_go --go_version="$go_version"
tempdir="$(mktemp -d)"
ynh_setup_source --dest_dir="$tempdir" --source_id="driver-lxc"
pushd $tempdir
final_path=$tempdir
ynh_use_go
export GOPATH="$tempdir/go"
export GOCACHE="$tempdir/.cache"
ynh_exec_warn_less $ynh_go build
popd
mv -f $tempdir/nomad-driver-lxc $datadir/plugins/nomad-driver-lxc
ynh_secure_remove --file="$tempdir"
fi
ynh_setup_source --dest_dir="$install_dir/driver_lxc" --source_id="driver_lxc"
pushd "$install_dir/driver_lxc"
ynh_use_go
export GOPATH="$install_dir/driver_lxc/go"
export GOCACHE="$install_dir/driver_lxc/.cache"
ynh_exec_warn_less "$ynh_go" build
popd
mv -f "$install_dir/driver_lxc/nomad-driver-lxc" "$data_dir/plugins/nomad-driver-lxc"
ynh_secure_remove --file="$install_dir/driver_lxc"
fi
#=================================================
@ -181,105 +54,62 @@ fi
ynh_script_progression --message="Adding a configuration file..." --weight=1
config_path=/etc/$app.d
ynh_app_setting_set --app=$app --key=config_path --value=$config_path
ynh_app_setting_set --app="$app" --key=config_path --value="$config_path"
mkdir -p $config_path
chmod 750 "$config_path"
chmod -R o-rwx "$config_path"
chown -R $app:$app "$config_path"
mkdir -p "$config_path"
ynh_add_config --template="../conf/nomad.hcl" --destination="$config_path/nomad.hcl"
chmod 400 "$config_path/nomad.hcl"
chown $app:$app "$config_path/nomad.hcl"
ynh_add_config --template="nomad.hcl" --destination="$config_path/nomad.hcl"
if [ "$node_type" == "server" ]
then
ynh_add_config --template="../conf/server.hcl" --destination="$config_path/server.hcl"
chmod 400 "$config_path/server.hcl"
chown $app:$app "$config_path/server.hcl"
if [ "$node_type" == "server" ]; then
ynh_add_config --template="server.hcl" --destination="$config_path/server.hcl"
fi
if [ "$node_type" == "client" ]
then
ynh_add_config --template="../conf/client.hcl" --destination="$config_path/client.hcl"
chmod 400 "$config_path/client.hcl"
chown $app:$app "$config_path/client.hcl"
if [ $driver_lxc -eq 1 ]
then
ynh_add_config --template="../conf/driver-lxc.hcl" --destination="$config_path/driver-lxc.hcl"
chmod 400 "$config_path/driver-lxc.hcl"
chown $app:$app "$config_path/driver-lxc.hcl"
if [ "$node_type" == "client" ]; then
ynh_add_config --template="client.hcl" --destination="$config_path/client.hcl"
ynh_add_config --template="../conf/dnsmasq-lxd" --destination="/etc/dnsmasq.d/lxd"
systemctl restart dnsmasq
if [ "$driver_lxc" -eq 1 ]; then
ynh_add_config --template="driver-lxc.hcl" --destination="$config_path/driver-lxc.hcl"
ynh_add_config --template="dnsmasq-lxd" --destination="/etc/dnsmasq.d/lxd"
systemctl restart dnsmasq
if [ ! ${PACKAGE_CHECK_EXEC:-0} -eq 1 ]; then
ynh_add_config --template="../conf/lxc-net" --destination="/etc/default/lxc-net"
fi
ynh_add_config --template="../conf/default.conf" --destination="/etc/lxc/default.conf"
systemctl enable lxc-net --quiet
ynh_systemd_action --service_name=lxc-net --action="restart" --line_match="Finished LXC network bridge setup" --log_path="systemd"
fi
if [ ! "${PACKAGE_CHECK_EXEC:-0}" -eq 1 ]; then
ynh_add_config --template="lxc-net" --destination="/etc/default/lxc-net"
fi
ynh_add_config --template="default.conf" --destination="/etc/lxc/default.conf"
systemctl enable lxc-net --quiet
ynh_systemd_action --service_name=lxc-net --action="restart" --line_match="Finished LXC network bridge setup" --log_path="systemd"
fi
fi
chmod -R go-rwx,u-w "$config_path"
chown -R "$app:$app" "$config_path"
#=================================================
# SETUP SYSTEMD
# SYSTEM CONFIGURATION
#=================================================
ynh_script_progression --message="Configuring a systemd service..." --weight=1
ynh_script_progression --message="Adding system configurations related to $app..." --weight=1
# Create a dedicated NGINX config
ynh_add_nginx_config
systemd_user=$app
if [ "$node_type" == "client" ]
then
systemd_user="root"
fi
# Create a dedicated systemd config
case "$node_type" in
client) systemd_user="root" ;;
server) systemd_user="$app" ;;
esac
ynh_add_systemd_config
#=================================================
# GENERIC FINALIZATION
#=================================================
# SETUP LOGROTATE
#=================================================
ynh_script_progression --message="Configuring log rotation..." --weight=1
yunohost service add "$app" --log="/var/log/$app/$app.log" --needs_exposed_ports="$port_rpc $port_serf"
# Use logrotate to manage application logfile(s)
ynh_use_logrotate
#=================================================
# INTEGRATE SERVICE IN YUNOHOST
#=================================================
ynh_script_progression --message="Integrating service in YunoHost..." --weight=1
yunohost service add $app --log="/var/log/$app/$app.log" --needs_exposed_ports $needs_exposed_ports
#=================================================
# START SYSTEMD SERVICE
#=================================================
ynh_script_progression --message="Starting a systemd service..." --weight=1
# Start a systemd service
ynh_systemd_action --service_name=$app --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# SETUP SSOWAT
#=================================================
ynh_script_progression --message="Configuring permissions..." --weight=1
# Make app public if necessary
if [ $is_public -eq 1 ]
then
# Everyone can access the app.
# The "main" permission is automatically created before the install script.
ynh_permission_update --permission="main" --add="visitors"
fi
#=================================================
# RELOAD NGINX
#=================================================
ynh_script_progression --message="Reloading NGINX web server..." --weight=1
ynh_systemd_action --service_name=nginx --action=reload
ynh_systemd_action --service_name="$app" --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# END OF SCRIPT

View file

@ -11,114 +11,47 @@ source ynh_install_go
source /usr/share/yunohost/helpers
#=================================================
# LOAD SETTINGS
# Stopping Nomad
#=================================================
ynh_script_progression --message="Loading installation settings..." --weight=1
app=$YNH_APP_INSTANCE_NAME
domain=$(ynh_app_setting_get --app=$app --key=domain)
config_path=$(ynh_app_setting_get --app=$app --key=config_path)
datadir=$(ynh_app_setting_get --app=$app --key=datadir)
node_type=$(ynh_app_setting_get --app=$app --key=node_type)
driver_lxc=$(ynh_app_setting_get --app=$app --key=driver_lxc)
rpc_port=$(ynh_app_setting_get --app=$app --key=rpc_port)
serf_port=$(ynh_app_setting_get --app=$app --key=serf_port)
#=================================================
# STANDARD REMOVE
#=================================================
# REMOVE SERVICE INTEGRATION IN YUNOHOST
#=================================================
# Remove the service from the list of services known by YunoHost (added from `yunohost service add`)
if ynh_exec_warn_less yunohost service status $app >/dev/null
then
ynh_script_progression --message="Removing $app service integration..." --weight=1
yunohost service remove $app
fi
#=================================================
# STOP AND REMOVE SERVICE
#=================================================
ynh_script_progression --message="Stopping and removing the systemd service..." --weight=1
ynh_script_progression --message="Trying to stop gracefully $app..." --weight=1
ynh_exec_warn_less timeout 25 nomad node drain -self -enable -yes -deadline 20s
# Remove the dedicated systemd config
ynh_remove_systemd_config
#=================================================
# REMOVE LOGROTATE CONFIGURATION
# REMOVE SYSTEM CONFIGURATIONS
#=================================================
ynh_script_progression --message="Removing logrotate configuration..." --weight=1
ynh_script_progression --message="Removing system configurations related to $app..." --weight=1
# Remove the service from the list of services known by YunoHost (added from `yunohost service add`)
if ynh_exec_warn_less yunohost service status "$app" >/dev/null; then
yunohost service remove "$app"
fi
# Remove the dedicated systemd config
ynh_remove_systemd_config
# Remove the app-specific logrotate config
ynh_remove_logrotate
#=================================================
# REMOVE NGINX CONFIGURATION
#=================================================
ynh_script_progression --message="Removing NGINX web server configuration..." --weight=1
# Remove the dedicated NGINX config
ynh_remove_nginx_config
#=================================================
# CLOSE A PORT
#=================================================
if [ "$node_type" == "client" ] && [ "$driver_lxc" -eq 1 ]; then
client_lxc_bridge=$(ynh_app_setting_get --app="$app" --key=client_lxc_bridge)
if yunohost firewall list | grep -q "\- $rpc_port$"
then
ynh_script_progression --message="Closing port $rpc_port..." --weight=1
ynh_exec_warn_less yunohost firewall disallow TCP $rpc_port
fi
if yunohost firewall list | grep -q "\- $serf_port$"
then
ynh_script_progression --message="Closing port $serf_port..." --weight=1
ynh_exec_warn_less yunohost firewall disallow TCP $serf_port
fi
if [ "$node_type" == "client" ]
then
if [ $driver_lxc -eq 1 ]
then
client_lxc_bridge=$(ynh_app_setting_get --app=$app --key=client_lxc_bridge)
ynh_systemd_action --service_name=lxc-net --action="stop"
systemctl disable lxc-net --quiet
ynh_secure_remove --file="/etc/default/lxc-net"
ynh_secure_remove --file="/etc/lxc/default.conf"
ynh_secure_remove --file="/etc/dnsmasq.d/lxd"
systemctl restart dnsmasq
fi
ynh_systemd_action --service_name=lxc-net --action="stop"
systemctl disable lxc-net --quiet
ynh_secure_remove --file="/etc/default/lxc-net"
ynh_secure_remove --file="/etc/lxc/default.conf"
ynh_secure_remove --file="/etc/dnsmasq.d/lxd"
systemctl restart dnsmasq
fi
#=================================================
# REMOVE DEPENDENCIES
#=================================================
ynh_script_progression --message="Removing dependencies..." --weight=1
# Remove metapackage and its dependencies
ynh_remove_app_dependencies
if [ "$node_type" == "client" ]
then
if [ $driver_lxc -eq 1 ]
then
ynh_remove_go
fi
fi
#=================================================
# REMOVE DATA DIR
#=================================================
# Remove the data directory if --purge option is used
if [ "${YNH_APP_PURGE:-0}" -eq 1 ]
then
ynh_script_progression --message="Removing app data directory..." --weight=1
ynh_secure_remove --file="$datadir"
if [ "$node_type" == "client" ] && [ "$driver_lxc" -eq 1 ]; then
ynh_script_progression --message="Removing Go..." --weight=1
ynh_remove_go
fi
#=================================================
@ -134,16 +67,6 @@ ynh_secure_remove --file="$config_path"
# Remove the log files
ynh_secure_remove --file="/var/log/$app"
#=================================================
# GENERIC FINALIZATION
#=================================================
# REMOVE DEDICATED USER
#=================================================
ynh_script_progression --message="Removing the dedicated system user..." --weight=1
# Delete a system user
ynh_system_user_delete --username=$app
#=================================================
# END OF SCRIPT
#=================================================

View file

@ -11,89 +11,22 @@ source ../settings/scripts/_common.sh
source /usr/share/yunohost/helpers
#=================================================
# MANAGE SCRIPT FAILURE
# RESTORE THE APP MAIN DIR
#=================================================
ynh_script_progression --message="Restoring the app main directory..." --weight=1
ynh_clean_setup () {
true
}
# Exit if an error occurs during the execution of the script
ynh_abort_if_errors
ynh_restore_file --origin_path="$install_dir"
#=================================================
# LOAD SETTINGS
#=================================================
ynh_script_progression --message="Loading installation settings..." --weight=1
app=$YNH_APP_INSTANCE_NAME
domain=$(ynh_app_setting_get --app=$app --key=domain)
path_url=$(ynh_app_setting_get --app=$app --key=path)
config_path=$(ynh_app_setting_get --app=$app --key=config_path)
datadir=$(ynh_app_setting_get --app=$app --key=datadir)
node_type=$(ynh_app_setting_get --app=$app --key=node_type)
driver_lxc=$(ynh_app_setting_get --app=$app --key=driver_lxc)
http_port=$(ynh_app_setting_get --app=$app --key=http_port)
rpc_port=$(ynh_app_setting_get --app=$app --key=rpc_port)
serf_port=$(ynh_app_setting_get --app=$app --key=serf_port)
#=================================================
# CHECK IF THE APP CAN BE RESTORED
#=================================================
ynh_script_progression --message="Validating restoration parameters..." --weight=1
#=================================================
# STANDARD RESTORATION STEPS
#=================================================
# RECREATE THE DEDICATED USER
#=================================================
ynh_script_progression --message="Recreating the dedicated system user..." --weight=1
# Create the dedicated user (if not existing)
ynh_system_user_create --username=$app
chown -R "$app:$app" "$install_dir"
#=================================================
# RESTORE THE DATA DIRECTORY
#=================================================
ynh_script_progression --message="Restoring the data directory..." --weight=1
ynh_restore_file --origin_path="$datadir" --not_mandatory
ynh_restore_file --origin_path="$data_dir" --not_mandatory
mkdir -p $datadir
chmod 750 "$datadir"
chmod -R o-rwx "$datadir"
chown -R $app:$app "$datadir"
#=================================================
# SPECIFIC RESTORATION
#=================================================
# REINSTALL DEPENDENCIES
#=================================================
ynh_script_progression --message="Reinstalling dependencies..." --weight=1
if [ "$node_type" == "server" ]
then
pkg_dependencies="$pkg_dependencies $server_pkg_dependencies"
fi
if [ "$node_type" == "client" ]
then
if [ $driver_lxc -eq 1 ]
then
client_pkg_dependencies="$client_pkg_dependencies $client_lxc_pkg_dependencies"
fi
pkg_dependencies="$pkg_dependencies $client_pkg_dependencies"
fi
ynh_install_app_dependencies $pkg_dependencies
ynh_install_extra_app_dependencies --repo="deb https://apt.releases.hashicorp.com $(lsb_release -cs) main" --package="$extra_pkg_dependencies" --key="https://apt.releases.hashicorp.com/gpg"
#=================================================
# RESTORE THE NGINX CONFIGURATION
#=================================================
ynh_script_progression --message="Restoring the NGINX web server configuration..." --weight=1
ynh_restore_file --origin_path="/etc/nginx/conf.d/$domain.d/$app.conf"
chown -R "$app:$app" "$data_dir"
#=================================================
# RESTORE VARIOUS FILES
@ -102,72 +35,48 @@ ynh_script_progression --message="Restoring various files..." --weight=1
ynh_restore_file --origin_path="$config_path"
chmod 750 "$config_path"
chmod -R o-rwx "$config_path"
chown -R $app:$app "$config_path"
chmod -R go-rwx,u-w "$config_path"
chown -R "$app:$app" "$config_path"
# Open the port
ynh_script_progression --message="Configuring firewall..."
ynh_exec_warn_less yunohost firewall allow --no-upnp TCP $rpc_port
needs_exposed_ports="$rpc_port"
if [ "$node_type" == "server" ]
then
ynh_exec_warn_less yunohost firewall allow --no-upnp TCP $serf_port
needs_exposed_ports="$serf_port $needs_exposed_ports"
fi
if [ "$node_type" == "client" ]; then
if [ "$driver_lxc" -eq 1 ]; then
client_lxc_main_iface=$(ip route | grep default | awk '{print $5;}')
ynh_app_setting_set --app="$app" --key=client_lxc_main_iface --value="$client_lxc_main_iface"
if [ "$node_type" == "client" ]
then
if [ $driver_lxc -eq 1 ]
then
client_lxc_bridge=$(ynh_app_setting_get --app=$app --key=client_lxc_bridge)
client_lxc_plage_ip=$(ynh_app_setting_get --app=$app --key=client_lxc_plage_ip)
client_lxc_main_iface=$(ip route | grep default | awk '{print $5;}')
ynh_app_setting_set --app=$app --key=client_lxc_main_iface --value=$client_lxc_main_iface
ynh_add_config --template="dnsmasq-lxd" --destination="/etc/dnsmasq.d/lxd"
systemctl restart dnsmasq
ynh_add_config --template="../conf/dnsmasq-lxd" --destination="/etc/dnsmasq.d/lxd"
systemctl restart dnsmasq
if [ ! ${PACKAGE_CHECK_EXEC:-0} -eq 1 ]; then
ynh_add_config --template="../conf/lxc-net" --destination="/etc/default/lxc-net"
fi
ynh_secure_remove --file="/etc/lxc/default.conf"
ynh_add_config --template="../conf/default.conf" --destination="/etc/lxc/default.conf"
systemctl enable lxc-net --quiet
ynh_systemd_action --service_name=lxc-net --action="restart" --line_match="Finished LXC network bridge setup" --log_path="systemd"
fi
if [ ! "${PACKAGE_CHECK_EXEC:-0}" -eq 1 ]; then
ynh_add_config --template="lxc-net" --destination="/etc/default/lxc-net"
fi
ynh_secure_remove --file="/etc/lxc/default.conf"
ynh_add_config --template="default.conf" --destination="/etc/lxc/default.conf"
systemctl enable lxc-net --quiet
ynh_systemd_action --service_name=lxc-net --action="restart" --line_match="Finished LXC network bridge setup" --log_path="systemd"
fi
fi
#=================================================
# RESTORE SYSTEMD
# RESTORE SYSTEM CONFIGURATIONS
#=================================================
ynh_script_progression --message="Restoring the systemd configuration..." --weight=1
ynh_script_progression --message="Restoring system configurations related to $app..." --weight=1
ynh_restore_file --origin_path="/etc/nginx/conf.d/$domain.d/$app.conf"
ynh_restore_file --origin_path="/etc/systemd/system/$app.service"
systemctl enable $app.service --quiet
systemctl enable "$app.service" --quiet
yunohost service add "$app" --log="/var/log/$app/$app.log" --needs_exposed_ports="$port_rpc $port_serf"
#=================================================
# RESTORE THE LOGROTATE CONFIGURATION
#=================================================
ynh_script_progression --message="Restoring the logrotate configuration..." --weight=1
mkdir -p /var/log/$app
chown -R $app:$app "/var/log/$app"
ynh_restore_file --origin_path="/etc/logrotate.d/$app"
#=================================================
# INTEGRATE SERVICE IN YUNOHOST
#=================================================
ynh_script_progression --message="Integrating service in YunoHost..." --weight=1
yunohost service add $app --log="/var/log/$app/$app.log" --needs_exposed_ports $needs_exposed_ports
ynh_restore_file --origin_path="/var/log/$app/"
#=================================================
# START SYSTEMD SERVICE
#=================================================
ynh_script_progression --message="Starting a systemd service..." --weight=1
ynh_systemd_action --service_name=$app --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
ynh_systemd_action --service_name="$app" --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# GENERIC FINALIZATION

View file

@ -10,47 +10,6 @@ source _common.sh
source ynh_install_go
source /usr/share/yunohost/helpers
#=================================================
# LOAD SETTINGS
#=================================================
ynh_script_progression --message="Loading installation settings..." --weight=1
app=$YNH_APP_INSTANCE_NAME
domain=$(ynh_app_setting_get --app=$app --key=domain)
path_url=$(ynh_app_setting_get --app=$app --key=path)
config_path=$(ynh_app_setting_get --app=$app --key=config_path)
datadir=$(ynh_app_setting_get --app=$app --key=datadir)
node_type=$(ynh_app_setting_get --app=$app --key=node_type)
bootstrap_expect=$(ynh_app_setting_get --app=$app --key=bootstrap_expect)
retry_join=$(ynh_app_setting_get --app=$app --key=retry_join)
server_ip=$(ynh_app_setting_get --app=$app --key=server_ip)
driver_lxc=$(ynh_app_setting_get --app=$app --key=driver_lxc)
http_port=$(ynh_app_setting_get --app=$app --key=http_port)
rpc_port=$(ynh_app_setting_get --app=$app --key=rpc_port)
serf_port=$(ynh_app_setting_get --app=$app --key=serf_port)
#=================================================
# CHECK VERSION
#=================================================
ynh_script_progression --message="Checking version..." --weight=1
upgrade_type=$(ynh_check_app_version_changed)
#=================================================
# BACKUP BEFORE UPGRADE THEN ACTIVE TRAP
#=================================================
ynh_script_progression --message="Backing up the app before upgrading (may take a while)..." --weight=1
# Backup the current version of the app
ynh_backup_before_upgrade
ynh_clean_setup () {
# Restore it if the upgrade fails
ynh_restore_upgradebackup
}
# Exit if an error occurs during the execution of the script
ynh_abort_if_errors
#=================================================
# STANDARD UPGRADE STEPS
#=================================================
@ -58,85 +17,30 @@ ynh_abort_if_errors
#=================================================
ynh_script_progression --message="Stopping a systemd service..." --weight=1
ynh_systemd_action --service_name=$app --action="stop" --log_path="/var/log/$app/$app.log"
ynh_systemd_action --service_name="$app" --action="stop" --log_path="/var/log/$app/$app.log"
#=================================================
# ENSURE DOWNWARD COMPATIBILITY
#=================================================
ynh_script_progression --message="Ensuring downward compatibility..." --weight=1
# ynh_script_progression --message="Ensuring downward compatibility..." --weight=1
# Cleaning legacy permissions
if ynh_legacy_permissions_exists; then
ynh_legacy_permissions_delete_all
ynh_app_setting_delete --app=$app --key=is_public
fi
#=================================================
# CREATE DEDICATED USER
#=================================================
ynh_script_progression --message="Making sure dedicated system user exists..." --weight=1
# Create a dedicated user (if not existing)
ynh_system_user_create --username=$app
#=================================================
# UPGRADE DEPENDENCIES
#=================================================
ynh_script_progression --message="Upgrading dependencies..." --weight=1
if [ "$node_type" == "server" ]
then
pkg_dependencies="$pkg_dependencies $server_pkg_dependencies"
fi
if [ "$node_type" == "client" ]
then
if [ $driver_lxc -eq 1 ]
then
client_pkg_dependencies="$client_pkg_dependencies $client_lxc_pkg_dependencies"
ynh_exec_warn_less ynh_install_go --go_version=$go_version
fi
pkg_dependencies="$pkg_dependencies $client_pkg_dependencies"
fi
ynh_install_app_dependencies $pkg_dependencies
ynh_install_extra_app_dependencies --repo="deb https://apt.releases.hashicorp.com $(lsb_release -cs) main" --package="$extra_pkg_dependencies" --key="https://apt.releases.hashicorp.com/gpg"
#=================================================
# NGINX CONFIGURATION
#=================================================
ynh_script_progression --message="Upgrading NGINX web server configuration..." --weight=1
# Create a dedicated NGINX config
ynh_add_nginx_config
#=================================================
# SPECIFIC UPGRADE
#=================================================
# BUILD DRIVERS
#=================================================
if [ "$node_type" == "client" ] && [ "$driver_lxc" -eq 1 ]; then
ynh_script_progression --message="Building LXC driver..."
if [ "$node_type" == "client" ]
then
if [ $driver_lxc -eq 1 ]
then
ynh_script_progression --message="Building LXC driver..." --weight=1
ynh_exec_warn_less ynh_install_go --go_version="$go_version"
tempdir="$(mktemp -d)"
ynh_setup_source --dest_dir="$tempdir" --source_id="driver-lxc"
pushd $tempdir
final_path=$tempdir
ynh_use_go
export GOPATH="$tempdir/go"
export GOCACHE="$tempdir/.cache"
ynh_exec_warn_less $ynh_go build
popd
mv -f $tempdir/nomad-driver-lxc $datadir/plugins/nomad-driver-lxc
ynh_secure_remove --file="$tempdir"
fi
ynh_setup_source --dest_dir="$install_dir/driver_lxc" --source_id="driver_lxc"
pushd "$install_dir/driver_lxc"
ynh_use_go
export GOPATH="$install_dir/driver_lxc/go"
export GOCACHE="$install_dir/driver_lxc/.cache"
ynh_exec_warn_less "$ynh_go" build
popd
mv -f "$install_dir/driver_lxc/nomad-driver-lxc" "$data_dir/plugins/nomad-driver-lxc"
ynh_secure_remove --file="$install_dir/driver_lxc"
fi
#=================================================
@ -144,84 +48,50 @@ fi
#=================================================
ynh_script_progression --message="Updating a configuration file..." --weight=1
mkdir -p $config_path
chmod 750 "$config_path"
chmod -R o-rwx "$config_path"
chown -R $app:$app "$config_path"
mkdir -p "$config_path"
ynh_add_config --template="../conf/nomad.hcl" --destination="$config_path/nomad.hcl"
chmod 400 "$config_path/nomad.hcl"
chown $app:$app "$config_path/nomad.hcl"
ynh_add_config --template="nomad.hcl" --destination="$config_path/nomad.hcl"
if [ "$node_type" == "server" ]
then
ynh_add_config --template="../conf/server.hcl" --destination="$config_path/server.hcl"
chmod 400 "$config_path/server.hcl"
chown $app:$app "$config_path/server.hcl"
if [ "$node_type" == "server" ]; then
ynh_add_config --template="server.hcl" --destination="$config_path/server.hcl"
fi
if [ "$node_type" == "client" ]
then
ynh_add_config --template="../conf/client.hcl" --destination="$config_path/client.hcl"
chmod 400 "$config_path/client.hcl"
chown $app:$app "$config_path/client.hcl"
if [ $driver_lxc -eq 1 ]
then
ynh_add_config --template="../conf/driver-lxc.hcl" --destination="$config_path/driver-lxc.hcl"
chmod 400 "$config_path/driver-lxc.hcl"
chown $app:$app "$config_path/driver-lxc.hcl"
fi
if [ "$node_type" == "client" ]; then
ynh_add_config --template="client.hcl" --destination="$config_path/client.hcl"
if [ "$driver_lxc" -eq 1 ]; then
ynh_add_config --template="driver-lxc.hcl" --destination="$config_path/driver-lxc.hcl"
fi
fi
chmod -R go-rwx,u-w "$config_path"
chown -R "$app:$app" "$config_path"
#=================================================
# SETUP SYSTEMD
# REAPPLY SYSTEM CONFIGURATIONS
#=================================================
ynh_script_progression --message="Upgrading systemd configuration..." --weight=1
ynh_script_progression --message="Upgrading system configurations related to $app..." --weight=1
# Create a dedicated NGINX config
ynh_add_nginx_config
systemd_user=$app
if [ "$node_type" == "client" ]
then
systemd_user="root"
fi
# Create a dedicated systemd config
case "$node_type" in
client) systemd_user="root" ;;
server) systemd_user="$app" ;;
esac
ynh_add_systemd_config
#=================================================
# GENERIC FINALIZATION
#=================================================
# SETUP LOGROTATE
#=================================================
ynh_script_progression --message="Upgrading logrotate configuration..." --weight=1
yunohost service add "$app" --log="/var/log/$app/$app.log" --needs_exposed_ports="$port_rpc $port_serf"
# Use logrotate to manage app-specific logfile(s)
ynh_use_logrotate --non-append
#=================================================
# INTEGRATE SERVICE IN YUNOHOST
#=================================================
ynh_script_progression --message="Integrating service in YunoHost..." --weight=1
needs_exposed_ports="$rpc_port"
if [ "$node_type" == "server" ]
then
needs_exposed_ports="$serf_port $needs_exposed_ports"
fi
yunohost service add $app --log="/var/log/$app/$app.log" --needs_exposed_ports $needs_exposed_ports
#=================================================
# START SYSTEMD SERVICE
#=================================================
ynh_script_progression --message="Starting a systemd service..." --weight=1
ynh_systemd_action --service_name=$app --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# RELOAD NGINX
#=================================================
ynh_script_progression --message="Reloading NGINX web server..." --weight=1
ynh_systemd_action --service_name=nginx --action=reload
ynh_systemd_action --service_name="$app" --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# END OF SCRIPT

18
tests.toml Normal file
View file

@ -0,0 +1,18 @@
#:schema https://raw.githubusercontent.com/YunoHost/apps/master/schemas/tests.v1.schema.json
test_format = 1.0
[default]
args.node_type = "server"
args.retry_join = "192.168.1.100"
args.server_ip = "none..."
# FIXME:
# test_upgrade_from.
[tests_as_client]
args.node_type = "client"
args.retry_join = "192.168.1.100"
args.server_ip = "192.168.1.100"