diff --git a/ALL_README.md b/ALL_README.md
index a01b345..8938aae 100644
--- a/ALL_README.md
+++ b/ALL_README.md
@@ -1,6 +1,7 @@
# All available README files by language
- [Read the README in English](README.md)
+- [Lee el README en español](README_es.md)
- [Irakurri README euskaraz](README_eu.md)
- [Lire le README en français](README_fr.md)
- [Le o README en galego](README_gl.md)
diff --git a/README.md b/README.md
index 70c460a..d0e9177 100644
--- a/README.md
+++ b/README.md
@@ -19,191 +19,15 @@ It shall NOT be edited by hand.
Nomad is a simple and flexible workload orchestrator to deploy and manage containers ([docker](https://www.nomadproject.io/docs/drivers/docker.html), [podman](https://www.nomadproject.io/docs/drivers/podman)), non-containerized applications ([executable](https://www.nomadproject.io/docs/drivers/exec.html), [Java](https://www.nomadproject.io/docs/drivers/java)), and virtual machines ([qemu](https://www.nomadproject.io/docs/drivers/qemu.html)) across on-prem and clouds at scale.
-**Shipped version:** 1.7.7~ynh1
+**Shipped version:** 1.7.7~ynh2
## Screenshots

-## Disclaimers / important information
-
-## Some Nomad Job examples
-
-### Busybox
-
-`lxc-create --name=busybox --template=busybox`
-
-```
-job "job-busybox" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-busybox" {
- task "task-busybox" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-busybox"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian
-
-`lxc-create --name=debian --template=debian`
-
-```
-job "job-debian" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-debian" {
- task "task-debian" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Stretch
-
-`lxc-create --name=stretch --template=debian -- --release=stretch`
-
-```
-job "job-stretch" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-stretch" {
- task "task-stretch" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=stretch"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster
-
-`lxc-create --name=buster --template=debian -- --release=buster`
-
-```
-job "job-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-buster" {
- task "task-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=buster"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster from images.linuxcontainers.org
-
-`lxc-create --name=download-buster --template=download -- --dist=debian --release=buster --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-buster" {
- task "task-download-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=buster","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Bullseye from images.linuxcontainers.org
-
-`lxc-create --name=download-bullseye --template=download -- --dist=debian --release=bullseye --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-bullseye" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-bullseye" {
- task "task-download-bullseye" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=bullseye","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
## Documentation and resources
-- Official app website:
+- Official app website:
- Official admin documentation:
- Upstream app code repository:
- YunoHost Store:
diff --git a/README_es.md b/README_es.md
new file mode 100644
index 0000000..c40de29
--- /dev/null
+++ b/README_es.md
@@ -0,0 +1,48 @@
+
+
+# Nomad para Yunohost
+
+[](https://dash.yunohost.org/appci/app/nomad)  
+
+[](https://install-app.yunohost.org/?app=nomad)
+
+*[Leer este README en otros idiomas.](./ALL_README.md)*
+
+> *Este paquete le permite instalarNomad rapidamente y simplement en un servidor YunoHost.*
+> *Si no tiene YunoHost, visita [the guide](https://yunohost.org/install) para aprender como instalarla.*
+
+## Descripción general
+
+Nomad is a simple and flexible workload orchestrator to deploy and manage containers ([docker](https://www.nomadproject.io/docs/drivers/docker.html), [podman](https://www.nomadproject.io/docs/drivers/podman)), non-containerized applications ([executable](https://www.nomadproject.io/docs/drivers/exec.html), [Java](https://www.nomadproject.io/docs/drivers/java)), and virtual machines ([qemu](https://www.nomadproject.io/docs/drivers/qemu.html)) across on-prem and clouds at scale.
+
+
+**Versión actual:** 1.7.7~ynh2
+
+## Capturas
+
+
+
+## Documentaciones y recursos
+
+- Sitio web oficial:
+- Documentación administrador oficial:
+- Repositorio del código fuente oficial de la aplicación :
+- Catálogo YunoHost:
+- Reportar un error:
+
+## Información para desarrolladores
+
+Por favor enviar sus correcciones a la [`branch testing`](https://github.com/YunoHost-Apps/nomad_ynh/tree/testing
+
+Para probar la rama `testing`, sigue asÍ:
+
+```bash
+sudo yunohost app install https://github.com/YunoHost-Apps/nomad_ynh/tree/testing --debug
+o
+sudo yunohost app upgrade nomad -u https://github.com/YunoHost-Apps/nomad_ynh/tree/testing --debug
+```
+
+**Mas informaciones sobre el empaquetado de aplicaciones:**
diff --git a/README_eu.md b/README_eu.md
index 8de9767..60f5ee9 100644
--- a/README_eu.md
+++ b/README_eu.md
@@ -19,191 +19,15 @@ EZ editatu eskuz.
Nomad is a simple and flexible workload orchestrator to deploy and manage containers ([docker](https://www.nomadproject.io/docs/drivers/docker.html), [podman](https://www.nomadproject.io/docs/drivers/podman)), non-containerized applications ([executable](https://www.nomadproject.io/docs/drivers/exec.html), [Java](https://www.nomadproject.io/docs/drivers/java)), and virtual machines ([qemu](https://www.nomadproject.io/docs/drivers/qemu.html)) across on-prem and clouds at scale.
-**Paketatutako bertsioa:** 1.7.7~ynh1
+**Paketatutako bertsioa:** 1.7.7~ynh2
## Pantaila-argazkiak

-## Ezespena / informazio garrantzitsua
-
-## Some Nomad Job examples
-
-### Busybox
-
-`lxc-create --name=busybox --template=busybox`
-
-```
-job "job-busybox" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-busybox" {
- task "task-busybox" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-busybox"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian
-
-`lxc-create --name=debian --template=debian`
-
-```
-job "job-debian" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-debian" {
- task "task-debian" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Stretch
-
-`lxc-create --name=stretch --template=debian -- --release=stretch`
-
-```
-job "job-stretch" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-stretch" {
- task "task-stretch" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=stretch"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster
-
-`lxc-create --name=buster --template=debian -- --release=buster`
-
-```
-job "job-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-buster" {
- task "task-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=buster"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster from images.linuxcontainers.org
-
-`lxc-create --name=download-buster --template=download -- --dist=debian --release=buster --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-buster" {
- task "task-download-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=buster","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Bullseye from images.linuxcontainers.org
-
-`lxc-create --name=download-bullseye --template=download -- --dist=debian --release=bullseye --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-bullseye" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-bullseye" {
- task "task-download-bullseye" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=bullseye","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
## Dokumentazioa eta baliabideak
-- Aplikazioaren webgune ofiziala:
+- Aplikazioaren webgune ofiziala:
- Administratzaileen dokumentazio ofiziala:
- Jatorrizko aplikazioaren kode-gordailua:
- YunoHost Denda:
diff --git a/README_fr.md b/README_fr.md
index 1fb93b5..0659463 100644
--- a/README_fr.md
+++ b/README_fr.md
@@ -19,191 +19,15 @@ Il NE doit PAS être modifié à la main.
Nomad is a simple and flexible workload orchestrator to deploy and manage containers ([docker](https://www.nomadproject.io/docs/drivers/docker.html), [podman](https://www.nomadproject.io/docs/drivers/podman)), non-containerized applications ([executable](https://www.nomadproject.io/docs/drivers/exec.html), [Java](https://www.nomadproject.io/docs/drivers/java)), and virtual machines ([qemu](https://www.nomadproject.io/docs/drivers/qemu.html)) across on-prem and clouds at scale.
-**Version incluse :** 1.7.7~ynh1
+**Version incluse :** 1.7.7~ynh2
## Captures d’écran

-## Avertissements / informations importantes
-
-## Some Nomad Job examples
-
-### Busybox
-
-`lxc-create --name=busybox --template=busybox`
-
-```
-job "job-busybox" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-busybox" {
- task "task-busybox" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-busybox"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian
-
-`lxc-create --name=debian --template=debian`
-
-```
-job "job-debian" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-debian" {
- task "task-debian" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Stretch
-
-`lxc-create --name=stretch --template=debian -- --release=stretch`
-
-```
-job "job-stretch" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-stretch" {
- task "task-stretch" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=stretch"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster
-
-`lxc-create --name=buster --template=debian -- --release=buster`
-
-```
-job "job-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-buster" {
- task "task-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=buster"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster from images.linuxcontainers.org
-
-`lxc-create --name=download-buster --template=download -- --dist=debian --release=buster --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-buster" {
- task "task-download-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=buster","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Bullseye from images.linuxcontainers.org
-
-`lxc-create --name=download-bullseye --template=download -- --dist=debian --release=bullseye --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-bullseye" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-bullseye" {
- task "task-download-bullseye" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=bullseye","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
## Documentations et ressources
-- Site officiel de l’app :
+- Site officiel de l’app :
- Documentation officielle de l’admin :
- Dépôt de code officiel de l’app :
- YunoHost Store :
diff --git a/README_gl.md b/README_gl.md
index f5e1c57..6cf6887 100644
--- a/README_gl.md
+++ b/README_gl.md
@@ -19,191 +19,15 @@ NON debe editarse manualmente.
Nomad is a simple and flexible workload orchestrator to deploy and manage containers ([docker](https://www.nomadproject.io/docs/drivers/docker.html), [podman](https://www.nomadproject.io/docs/drivers/podman)), non-containerized applications ([executable](https://www.nomadproject.io/docs/drivers/exec.html), [Java](https://www.nomadproject.io/docs/drivers/java)), and virtual machines ([qemu](https://www.nomadproject.io/docs/drivers/qemu.html)) across on-prem and clouds at scale.
-**Versión proporcionada:** 1.7.7~ynh1
+**Versión proporcionada:** 1.7.7~ynh2
## Capturas de pantalla

-## Avisos / información importante
-
-## Some Nomad Job examples
-
-### Busybox
-
-`lxc-create --name=busybox --template=busybox`
-
-```
-job "job-busybox" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-busybox" {
- task "task-busybox" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-busybox"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian
-
-`lxc-create --name=debian --template=debian`
-
-```
-job "job-debian" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-debian" {
- task "task-debian" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Stretch
-
-`lxc-create --name=stretch --template=debian -- --release=stretch`
-
-```
-job "job-stretch" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-stretch" {
- task "task-stretch" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=stretch"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster
-
-`lxc-create --name=buster --template=debian -- --release=buster`
-
-```
-job "job-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-buster" {
- task "task-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=buster"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster from images.linuxcontainers.org
-
-`lxc-create --name=download-buster --template=download -- --dist=debian --release=buster --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-buster" {
- task "task-download-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=buster","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Bullseye from images.linuxcontainers.org
-
-`lxc-create --name=download-bullseye --template=download -- --dist=debian --release=bullseye --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-bullseye" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-bullseye" {
- task "task-download-bullseye" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=bullseye","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
## Documentación e recursos
-- Web oficial da app:
+- Web oficial da app:
- Documentación oficial para admin:
- Repositorio de orixe do código:
- Tenda YunoHost:
diff --git a/README_zh_Hans.md b/README_zh_Hans.md
index 241dbcc..6150633 100644
--- a/README_zh_Hans.md
+++ b/README_zh_Hans.md
@@ -3,7 +3,7 @@
请勿手动编辑。
-->
-# YunoHost 的 Nomad
+# YunoHost 上的 Nomad
[](https://dash.yunohost.org/appci/app/nomad)  
@@ -19,191 +19,15 @@
Nomad is a simple and flexible workload orchestrator to deploy and manage containers ([docker](https://www.nomadproject.io/docs/drivers/docker.html), [podman](https://www.nomadproject.io/docs/drivers/podman)), non-containerized applications ([executable](https://www.nomadproject.io/docs/drivers/exec.html), [Java](https://www.nomadproject.io/docs/drivers/java)), and virtual machines ([qemu](https://www.nomadproject.io/docs/drivers/qemu.html)) across on-prem and clouds at scale.
-**分发版本:** 1.7.7~ynh1
+**分发版本:** 1.7.7~ynh2
## 截图

-## 免责声明 / 重要信息
-
-## Some Nomad Job examples
-
-### Busybox
-
-`lxc-create --name=busybox --template=busybox`
-
-```
-job "job-busybox" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-busybox" {
- task "task-busybox" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-busybox"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian
-
-`lxc-create --name=debian --template=debian`
-
-```
-job "job-debian" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-debian" {
- task "task-debian" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Stretch
-
-`lxc-create --name=stretch --template=debian -- --release=stretch`
-
-```
-job "job-stretch" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-stretch" {
- task "task-stretch" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=stretch"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster
-
-`lxc-create --name=buster --template=debian -- --release=buster`
-
-```
-job "job-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-buster" {
- task "task-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=buster"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Buster from images.linuxcontainers.org
-
-`lxc-create --name=download-buster --template=download -- --dist=debian --release=buster --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-buster" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-buster" {
- task "task-download-buster" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=buster","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
-### Debian Bullseye from images.linuxcontainers.org
-
-`lxc-create --name=download-bullseye --template=download -- --dist=debian --release=bullseye --arch=amd64 --keyserver=hkp://keyserver.ubuntu.com`
-
-```
-job "job-download-bullseye" {
- datacenters = ["dc1"]
- type = "service"
-
- group "group-download-bullseye" {
- task "task-download-bullseye" {
- driver = "lxc"
-
- config {
- log_level = "info"
- verbosity = "verbose"
- template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=bullseye","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
- }
-
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-}
-```
-
## 文档与资源
-- 官方应用网站:
+- 官方应用网站:
- 官方管理文档:
- 上游应用代码库:
- YunoHost 商店:
diff --git a/check_process b/check_process
deleted file mode 100644
index 0d02bbf..0000000
--- a/check_process
+++ /dev/null
@@ -1,47 +0,0 @@
-;; Test complet client
- ; Manifest
- domain="domain.tld"
- is_public=1
- node_type="client"
- bootstrap_expect="1"
- retry_join="192.168.1.100"
- server_ip="192.168.1.100"
- driver_lxc=1
- ; Checks
- pkg_linter=1
- setup_sub_dir=0
- setup_root=1
- setup_nourl=0
- setup_private=1
- setup_public=1
- upgrade=1
- #upgrade=1 from_commit=CommitHash
- backup_restore=1
- multi_instance=0
- port_already_use=0
- change_url=1
-;; Test complet server
- ; Manifest
- domain="domain.tld"
- is_public=1
- node_type="server"
- bootstrap_expect="1"
- retry_join="192.168.1.100"
- server_ip="none..."
- driver_lxc=1
- ; Checks
- pkg_linter=1
- setup_sub_dir=0
- setup_root=1
- setup_nourl=0
- setup_private=1
- setup_public=1
- upgrade=1
- #upgrade=1 from_commit=CommitHash
- backup_restore=1
- multi_instance=0
- port_already_use=0
- change_url=1
-;;; Options
-Email=
-Notification=none
diff --git a/conf/client.hcl b/conf/client.hcl
index ee2067f..4a74155 100644
--- a/conf/client.hcl
+++ b/conf/client.hcl
@@ -1,41 +1,41 @@
#----------------------- client-specific options ---------------------
client {
# A boolean indicating if client mode is enabled. All other client configuration options depend on this value.
- # Defaults to false.
+ # Defaults to false.
enabled = true
# This is the state dir used to store client state. By default, it lives inside of the data_dir, in the
- # "client" sub-path.
+ # "client" sub-path.
# state_dir = "/tmp/client"
# A directory used to store allocation data. Depending on the workload, the size of this directory can grow
# arbitrarily large as it is used to store downloaded artifacts for drivers (QEMU images, JAR files, etc.). It is therefore
# important to ensure this directory is placed some place on the filesystem with adequate storage capacity. By default, this
- # directory lives under the data_dir at the "alloc" sub-path.
+ # directory lives under the data_dir at the "alloc" sub-path.
# alloc_dir = "/tmp/alloc"
# An array of server addresses. This list is used to register the client with the server nodes and advertise
- # the available resources so that the agent can receive work.
- servers = ["__SERVER_IP__:__RPC_PORT__"]
+ # the available resources so that the agent can receive work.
+ servers = ["__SERVER_IP__:__PORT_RPC__"]
# This is the value used to uniquely identify the local agent's node registration with the servers. This can be any arbitrary
- # string but must be unique to the cluster. By default, if not specified, a randomly- generate UUID will be used.
+ # string but must be unique to the cluster. By default, if not specified, a randomly- generate UUID will be used.
# node_id = "foo"
# A string used to logically group client nodes by class. This can be used during job placement as a filter.
- # This option is not required and has no default.
+ # This option is not required and has no default.
# node_class = "experimentation"
- # This is a key/value mapping of metadata pairs. This is a free-form map and can contain any string values.
+ # This is a key/value mapping of metadata pairs. This is a free-form map and can contain any string values.
meta {}
- # This is a key/value mapping of internal configuration for clients, such as for driver configuration.
+ # This is a key/value mapping of internal configuration for clients, such as for driver configuration.
options {}
-
- # This is a string to force network fingerprinting to use a specific network interface
+
+ # This is a string to force network fingerprinting to use a specific network interface
# network_interface = "eth0"
# This is an int that sets the default link speed of network interfaces, in megabits, if their speed can not be
- # determined dynamically.
+ # determined dynamically.
network_speed = 100
}
diff --git a/conf/driver-lxc.src b/conf/driver-lxc.src
deleted file mode 100644
index 44fc2aa..0000000
--- a/conf/driver-lxc.src
+++ /dev/null
@@ -1,7 +0,0 @@
-SOURCE_URL=https://github.com/hashicorp/nomad-driver-lxc/archive/68239f4f639bde68e80616b7e931b8cc368969b0.tar.gz
-SOURCE_SUM=50ddae947a189fefe0f6a5419d8f5ae749daa124f100b3ce900d83eab073c2ad
-SOURCE_SUM_PRG=sha256sum
-SOURCE_FORMAT=tar.gz
-SOURCE_IN_SUBDIR=true
-SOURCE_FILENAME=
-SOURCE_EXTRACT=true
diff --git a/conf/nginx.conf b/conf/nginx.conf
index 618f075..8e29125 100644
--- a/conf/nginx.conf
+++ b/conf/nginx.conf
@@ -1,7 +1,7 @@
#sub_path_only rewrite ^__PATH__$ __PATH__/ permanent;
location __PATH__/ {
- proxy_pass http://127.0.0.1:__HTTP_PORT__;
+ proxy_pass http://127.0.0.1:__PORT__;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Nomad blocking queries will remain open for a default of 5 minutes.
diff --git a/conf/nomad.hcl b/conf/nomad.hcl
index 8656f8c..dc622ab 100644
--- a/conf/nomad.hcl
+++ b/conf/nomad.hcl
@@ -1,11 +1,11 @@
# -------------- General options ---------------
# Specifies the region the Nomad agent is a member of. A region typically maps to a
-# geographic region, for example USA, with potentially multiple zones, which map to
+# geographic region, for example USA, with potentially multiple zones, which map to
# datacenters such as us-west and us-east. Defaults to global.
#region = "USA"
-# Datacenter of the local agent. All members of a datacenter should share a local
+# Datacenter of the local agent. All members of a datacenter should share a local
# LAN connection. Defaults to dc1.
#datacenter = "data-center-one"
@@ -17,14 +17,14 @@
# A local directory used to store agent state. Client nodes use this directory by
# default to store temporary allocation data as well as cluster information. Server
# nodes use this directory to store cluster state, including the replicated log and
-# snapshot data. This option is required to start the Nomad agent.
-data_dir = "__DATADIR__"
+# snapshot data. This option is required to start the Nomad agent.
+data_dir = "__DATA_DIR__"
# Controls the verbosity of logs the Nomad agent will output. Valid log levels include
-# WARN, INFO, or DEBUG in increasing order of verbosity. Defaults to INFO.
+# WARN, INFO, or DEBUG in increasing order of verbosity. Defaults to INFO.
#log_level = "DEBUG"
-# Used to indicate which address the Nomad agent should bind to for network services,
+# Used to indicate which address the Nomad agent should bind to for network services,
# including the HTTP interface as well as the internal gossip protocol and RPC mechanism.
# This should be specified in IP format, and can be used to easily bind all network services
# to the same address. It is also possible to bind the individual services to different
@@ -39,16 +39,16 @@ enable_debug = false
# Controls the network ports used for different services required by the Nomad agent.
ports {
- # The port used to run the HTTP server. Applies to both client and server nodes. Defaults to __HTTP_PORT__.
- http = __HTTP_PORT__
-
+ # The port used to run the HTTP server. Applies to both client and server nodes. Defaults to __PORT__.
+ http = __PORT__
+
# The port used for internal RPC communication between agents and servers, and for inter-server
- # traffic for the consensus algorithm (raft). Defaults to __RPC_PORT__. Only used on server nodes.
- rpc = __RPC_PORT__
+ # traffic for the consensus algorithm (raft). Defaults to __PORT_RPC__. Only used on server nodes.
+ rpc = __PORT_RPC__
# The port used for the gossip protocol for cluster membership. Both TCP and UDP should be routable
- # between the server nodes on this port. Defaults to __SERF_PORT__. Only used on server nodes.
- serf = __SERF_PORT__
+ # between the server nodes on this port. Defaults to __PORT_SERF__. Only used on server nodes.
+ serf = __PORT_SERF__
}
# Controls the bind address for individual network services. Any values configured in this block
@@ -56,16 +56,16 @@ ports {
addresses {
# The address the HTTP server is bound to. This is the most common bind address to change.
- # Applies to both clients and servers.
+ # Applies to both clients and servers.
# http = "0.0.0.0"
# The address to bind the internal RPC interfaces to. Should be exposed only to other cluster
- # members if possible. Used only on server nodes, but must be accessible from all agents.
+ # members if possible. Used only on server nodes, but must be accessible from all agents.
# rpc = "0.0.0.0"
# The address used to bind the gossip layer to. Both a TCP and UDP listener will be exposed on this
# address. Should be restricted to only server nodes from the same datacenter if possible.
- # Used only on server nodes.
+ # Used only on server nodes.
# serf = "0.0.0.0"
}
@@ -76,12 +76,12 @@ addresses {
#advertise {
# The address to advertise for the RPC interface. This address should be reachable by all of
- # the agents in the cluster.
-# rpc = "1.2.3.4:__RPC_PORT__"
+ # the agents in the cluster.
+# rpc = "1.2.3.4:__PORT_RPC__"
# The address advertised for the gossip layer. This address must be reachable from all server nodes.
- # It is not required that clients can reach this address.
-# serf = "1.2.3.4:__SERF_PORT__"
+ # It is not required that clients can reach this address.
+# serf = "1.2.3.4:__PORT_SERF__"
#}
# Used to control how the Nomad agent exposes telemetry data to external metrics collection servers.
@@ -93,7 +93,7 @@ telemetry {
# statsd_address = "1.2.3.4:5678"
# A boolean indicating if gauge values should not be prefixed with the local hostname.
-# disable_hostname = false
+# disable_hostname = false
}
# Enables gracefully leaving when receiving the interrupt signal. By default, the agent will
@@ -101,7 +101,7 @@ telemetry {
leave_on_interrupt = false
# Enables gracefully leaving when receiving the terminate signal. By default, the agent will
-# exit forcefully on any signal.
+# exit forcefully on any signal.
leave_on_terminate = false
# Enables logging to syslog. This option only works on Unix based systems.
diff --git a/doc/DISCLAIMER.md b/doc/ADMIN.md
similarity index 90%
rename from doc/DISCLAIMER.md
rename to doc/ADMIN.md
index 9bbe2f0..be2ae12 100644
--- a/doc/DISCLAIMER.md
+++ b/doc/ADMIN.md
@@ -73,7 +73,7 @@ job "job-stretch" {
log_level = "info"
verbosity = "verbose"
template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=stretch"]
+ template_args = ["--release=stretch"]
}
resources {
@@ -102,7 +102,7 @@ job "job-buster" {
log_level = "info"
verbosity = "verbose"
template = "/usr/share/lxc/templates/lxc-debian"
- template_args = ["--release=buster"]
+ template_args = ["--release=buster"]
}
resources {
@@ -131,7 +131,7 @@ job "job-download-buster" {
log_level = "info"
verbosity = "verbose"
template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=buster","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
+ template_args = ["--dist=debian","--release=buster","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
}
resources {
@@ -160,7 +160,7 @@ job "job-download-bullseye" {
log_level = "info"
verbosity = "verbose"
template = "/usr/share/lxc/templates/lxc-download"
- template_args = ["--dist=debian","--release=bullseye","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
+ template_args = ["--dist=debian","--release=bullseye","--arch=amd64","--keyserver=hkp://keyserver.ubuntu.com"]
}
resources {
diff --git a/manifest.json b/manifest.json
deleted file mode 100644
index 1f00e58..0000000
--- a/manifest.json
+++ /dev/null
@@ -1,109 +0,0 @@
-{
- "name": "Nomad",
- "id": "nomad",
- "packaging_format": 1,
- "description": {
- "en": "Simple and flexible workload orchestrator"
- },
- "version": "1.7.7~ynh1",
- "url": "https://www.nomadproject.io/",
- "upstream": {
- "license": "MPL-2.0",
- "website": "https://www.nomadproject.io/",
- "admindoc": "https://www.nomadproject.io/docs",
- "code": "https://github.com/hashicorp/nomad"
- },
- "license": "MPL-2.0",
- "maintainer": {
- "name": "",
- "email": ""
- },
- "requirements": {
- "yunohost": ">= 11.2"
- },
- "multi_instance": false,
- "services": [
- "nginx"
- ],
- "arguments": {
- "install": [
- {
- "name": "domain",
- "type": "domain"
- },
- {
- "name": "is_public",
- "type": "boolean",
- "default": true
- },
- {
- "name": "node_type",
- "type": "select",
- "ask": {
- "en": "What kind of Nomad node you want to install ?"
- },
- "choices": [
- "server",
- "client"
- ],
- "default": "server"
- },
- {
- "name": "bootstrap_expect",
- "type": "select",
- "ask": {
- "en": "[Server only] How many server nodes to wait for before bootstrapping ?"
- },
- "choices": [
- "1",
- "3",
- "5",
- "7",
- "9"
- ],
- "default": "1",
- "help": {
- "en": "For production, it's recommanded to have 3 to 5 server nodes."
- }
- },
- {
- "name": "retry_join",
- "type": "string",
- "ask": {
- "en": "[Server only] What is the IP of another server to join ?"
- },
- "example": "192.168.1.100",
- "optional": true
- },
- {
- "name": "server_ip",
- "type": "string",
- "ask": {
- "en": "[Client only] What is the IP of the Nomad server node ?"
- },
- "example": "192.168.1.100",
- "optional": true
- },
- {
- "name": "server_ip",
- "type": "string",
- "ask": {
- "en": "[Client only] What is the IP of the Nomad server node ?"
- },
- "example": "192.168.1.100",
- "optional": true
- },
- {
- "name": "driver_lxc",
- "type": "boolean",
- "ask": {
- "en": "[Client only] Do you want to install LXC driver ?"
- },
- "default": true,
- "help": {
- "en": "It will also install lxc."
- }
- }
- ]
- }
-}
diff --git a/manifest.toml b/manifest.toml
new file mode 100644
index 0000000..9a56390
--- /dev/null
+++ b/manifest.toml
@@ -0,0 +1,108 @@
+#:schema https://raw.githubusercontent.com/YunoHost/apps/master/schemas/manifest.v2.schema.json
+
+packaging_format = 2
+
+id = "nomad"
+name = "Nomad"
+description.en = "Simple and flexible workload orchestrator"
+description.fr = "Orchestrateur de charge de travail simple et flexible"
+
+version = "1.7.7~ynh2"
+
+maintainers = []
+
+[upstream]
+license = "MPL-2.0"
+website = "https://www.nomadproject.io"
+admindoc = "https://www.nomadproject.io/docs"
+code = "https://github.com/hashicorp/nomad"
+cpe = "cpe:2.3:a:hashicorp:nomad"
+
+[integration]
+yunohost = ">= 11.2"
+architectures = "all"
+multi_instance = false
+ldap = "not_relevant"
+sso = "not_relevant"
+disk = "300M"
+ram.build = "100M"
+ram.runtime = "50M"
+
+[install]
+ [install.domain]
+ type = "domain"
+
+ [install.init_main_permission]
+ type = "group"
+ default = "visitors"
+
+ [install.node_type]
+ ask.en = "What kind of Nomad node you want to install?"
+ type = "select"
+ choices = ["server", "client"]
+ default = "server"
+
+ [install.bootstrap_expect]
+ ask.en = "[Server only] How many server nodes to wait for before bootstrapping?"
+ help.en = "For production, it's recommanded to have 3 to 5 server nodes."
+ type = "select"
+ choices = ["1", "3", "5", "7", "9"]
+ default = "1"
+
+ [install.retry_join]
+ ask.en = "[Server only] What is the IP of another server to join?"
+ type = "string"
+ example = "192.168.1.100"
+ optional = true
+
+ [install.server_ip]
+ ask.en = "[Client only] What is the IP of the Nomad server node?"
+ type = "string"
+ example = "192.168.1.100"
+ optional = true
+
+ [install.driver_lxc]
+ ask.en = "[Client only] Do you want to install LXC driver?"
+ help.en = "It will also install lxc."
+ type = "boolean"
+ default = true
+
+[resources]
+ [resources.sources.driver_lxc]
+ url = "https://github.com/hashicorp/nomad-driver-lxc/archive/68239f4f639bde68e80616b7e931b8cc368969b0.tar.gz"
+ sha256 = "50ddae947a189fefe0f6a5419d8f5ae749daa124f100b3ce900d83eab073c2ad"
+
+ [resources.system_user]
+
+ [resources.install_dir]
+
+ [resources.data_dir]
+ subdirs = ["plugins"]
+
+ [resources.permissions]
+ main.url = "/"
+
+ [resources.ports]
+ main.default = 4646
+
+ rpc.default = 4647
+ rpc.fixed = true
+ rpc.exposed = "TCP"
+ serf.default = 4648
+ serf.fixed = true
+ serf.exposed = "TCP"
+
+ [resources.apt]
+ packages = []
+ packages_from_raw_bash = """
+ if [ "$node_type" == "client" ]; then
+ if [ "$driver_lxc" -eq 1 ]; then
+ echo pkg-config lxc-dev lxc lxc-templates
+ fi
+ fi
+ """
+
+ [resources.apt.extras.nomad]
+ repo = "deb https://apt.releases.hashicorp.com bullseye main"
+ key = "https://apt.releases.hashicorp.com/gpg"
+ packages = ["nomad"]
diff --git a/scripts/_common.sh b/scripts/_common.sh
index c1c982b..f122fe2 100644
--- a/scripts/_common.sh
+++ b/scripts/_common.sh
@@ -4,15 +4,6 @@
# COMMON VARIABLES
#=================================================
-# dependencies used by the app (must be on a single line)
-pkg_dependencies=""
-extra_pkg_dependencies="nomad"
-
-server_pkg_dependencies=""
-
-client_pkg_dependencies=""
-client_lxc_pkg_dependencies="pkg-config lxc-dev lxc lxc-templates"
-
go_version=1.20
#=================================================
diff --git a/scripts/backup b/scripts/backup
index 241c88c..140b903 100755
--- a/scripts/backup
+++ b/scripts/backup
@@ -1,7 +1,5 @@
#!/bin/bash
-#=================================================
-# GENERIC START
#=================================================
# IMPORT GENERIC HELPERS
#=================================================
@@ -10,64 +8,41 @@
source ../settings/scripts/_common.sh
source /usr/share/yunohost/helpers
-#=================================================
-# MANAGE SCRIPT FAILURE
-#=================================================
-
-ynh_clean_setup () {
- true
-}
-# Exit if an error occurs during the execution of the script
-ynh_abort_if_errors
-
-#=================================================
-# LOAD SETTINGS
-#=================================================
-ynh_print_info --message="Loading installation settings..."
-
-app=$YNH_APP_INSTANCE_NAME
-
-config_path=$(ynh_app_setting_get --app=$app --key=config_path)
-domain=$(ynh_app_setting_get --app=$app --key=domain)
-datadir=$(ynh_app_setting_get --app=$app --key=datadir)
-
#=================================================
# DECLARE DATA AND CONF FILES TO BACKUP
#=================================================
ynh_print_info --message="Declaring files to be backed up..."
+#=================================================
+# BACKUP THE APP MAIN DIR
+#=================================================
+
+ynh_backup --src_path="$install_dir"
+
#=================================================
# BACKUP THE DATA DIR
#=================================================
-ynh_backup --src_path="$datadir" --is_big
+ynh_backup --src_path="$data_dir" --is_big
#=================================================
-# BACKUP THE NGINX CONFIGURATION
+# SYSTEM CONFIGURATION
#=================================================
ynh_backup --src_path="/etc/nginx/conf.d/$domain.d/$app.conf"
-#=================================================
-# SPECIFIC BACKUP
-#=================================================
-# BACKUP LOGROTATE
-#=================================================
+ynh_backup --src_path="/etc/systemd/system/$app.service"
ynh_backup --src_path="/etc/logrotate.d/$app"
-#=================================================
-# BACKUP SYSTEMD
-#=================================================
-
-ynh_backup --src_path="/etc/systemd/system/$app.service"
-
#=================================================
# BACKUP VARIOUS FILES
#=================================================
ynh_backup --src_path="$config_path"
+ynh_backup --src_path="/var/log/$app/"
+
#=================================================
# END OF SCRIPT
#=================================================
diff --git a/scripts/change_url b/scripts/change_url
index a10d249..7d1fd38 100644
--- a/scripts/change_url
+++ b/scripts/change_url
@@ -1,7 +1,5 @@
#!/bin/bash
-#=================================================
-# GENERIC STARTING
#=================================================
# IMPORT GENERIC HELPERS
#=================================================
@@ -9,122 +7,27 @@
source _common.sh
source /usr/share/yunohost/helpers
-#=================================================
-# RETRIEVE ARGUMENTS
-#=================================================
-
-old_domain=$YNH_APP_OLD_DOMAIN
-old_path=$YNH_APP_OLD_PATH
-
-new_domain=$YNH_APP_NEW_DOMAIN
-new_path=$YNH_APP_NEW_PATH
-
-app=$YNH_APP_INSTANCE_NAME
-
-#=================================================
-# LOAD SETTINGS
-#=================================================
-ynh_script_progression --message="Loading installation settings..." --weight=1
-
-# Needed for helper "ynh_add_nginx_config"
-config_path=$(ynh_app_setting_get --app=$app --key=config_path)
-
-# Add settings here as needed by your application
-http_port=$(ynh_app_setting_get --app=$app --key=http_port)
-
-#=================================================
-# BACKUP BEFORE CHANGE URL THEN ACTIVE TRAP
-#=================================================
-ynh_script_progression --message="Backing up the app before changing its URL (may take a while)..." --weight=1
-
-# Backup the current version of the app
-ynh_backup_before_upgrade
-ynh_clean_setup () {
- # Remove the new domain config file, the remove script won't do it as it doesn't know yet its location.
- ynh_secure_remove --file="/etc/nginx/conf.d/$new_domain.d/$app.conf"
-
- # Restore it if the upgrade fails
- ynh_restore_upgradebackup
-}
-# Exit if an error occurs during the execution of the script
-ynh_abort_if_errors
-
-#=================================================
-# CHECK WHICH PARTS SHOULD BE CHANGED
-#=================================================
-
-change_domain=0
-if [ "$old_domain" != "$new_domain" ]
-then
- change_domain=1
-fi
-
-change_path=0
-if [ "$old_path" != "$new_path" ]
-then
- change_path=1
-fi
-
-#=================================================
-# STANDARD MODIFICATIONS
#=================================================
# STOP SYSTEMD SERVICE
#=================================================
-ynh_script_progression --message="Stopping a systemd service..." --weight=1
+ynh_script_progression --message="Stopping $app's systemd service..." --weight=1
-ynh_systemd_action --service_name=$app --action="stop" --log_path="/var/log/$app/$app.log"
+ynh_systemd_action --service_name="$app" --action="stop" --log_path="/var/log/$app/$app.log"
#=================================================
# MODIFY URL IN NGINX CONF
#=================================================
ynh_script_progression --message="Updating NGINX web server configuration..." --weight=1
-nginx_conf_path=/etc/nginx/conf.d/$old_domain.d/$app.conf
+ynh_change_url_nginx_config
-# Change the path in the NGINX config file
-if [ $change_path -eq 1 ]
-then
- # Make a backup of the original NGINX config file if modified
- ynh_backup_if_checksum_is_different --file="$nginx_conf_path"
- # Set global variables for NGINX helper
- domain="$old_domain"
- path_url="$new_path"
- # Create a dedicated NGINX config
- ynh_add_nginx_config
-fi
-
-# Change the domain for NGINX
-if [ $change_domain -eq 1 ]
-then
- # Delete file checksum for the old conf file location
- ynh_delete_file_checksum --file="$nginx_conf_path"
- mv $nginx_conf_path /etc/nginx/conf.d/$new_domain.d/$app.conf
- # Store file checksum for the new config file location
- ynh_store_file_checksum --file="/etc/nginx/conf.d/$new_domain.d/$app.conf"
-fi
-
-#=================================================
-# SPECIFIC MODIFICATIONS
-#=================================================
-# ...
-#=================================================
-
-#=================================================
-# GENERIC FINALISATION
#=================================================
# START SYSTEMD SERVICE
#=================================================
-ynh_script_progression --message="Starting a systemd service..." --weight=1
+ynh_script_progression --message="Starting $app's systemd service..." --weight=1
# Start a systemd service
-ynh_systemd_action --service_name=$app --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
-
-#=================================================
-# RELOAD NGINX
-#=================================================
-ynh_script_progression --message="Reloading NGINX web server..." --weight=1
-
-ynh_systemd_action --service_name=nginx --action=reload
+ynh_systemd_action --service_name="$app" --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# END OF SCRIPT
diff --git a/scripts/install b/scripts/install
index f375f75..ef99623 100644
--- a/scripts/install
+++ b/scripts/install
@@ -1,7 +1,5 @@
#!/bin/bash
-#=================================================
-# GENERIC START
#=================================================
# IMPORT GENERIC HELPERS
#=================================================
@@ -11,168 +9,41 @@ source ynh_install_go
source /usr/share/yunohost/helpers
#=================================================
-# MANAGE SCRIPT FAILURE
+# INITIALIZE AND STORE SETTINGS
#=================================================
-ynh_clean_setup () {
- true
-}
-# Exit if an error occurs during the execution of the script
-ynh_abort_if_errors
-
-#=================================================
-# RETRIEVE ARGUMENTS FROM THE MANIFEST
-#=================================================
-
-domain=$YNH_APP_ARG_DOMAIN
-path_url="/"
-is_public=$YNH_APP_ARG_IS_PUBLIC
-node_type=$YNH_APP_ARG_NODE_TYPE
-bootstrap_expect=$YNH_APP_ARG_BOOTSTRAP_EXPECT
-retry_join=$YNH_APP_ARG_RETRY_JOIN
-server_ip=$YNH_APP_ARG_SERVER_IP
-driver_lxc=$YNH_APP_ARG_DRIVER_LXC
-
-app=$YNH_APP_INSTANCE_NAME
-
client_lxc_bridge="lxcbr0"
client_lxc_plage_ip="10.1.44"
client_lxc_main_iface=$(ip route | grep default | awk '{print $5;}')
+ynh_app_setting_set --app="$app" --key=client_lxc_bridge --value="$client_lxc_bridge"
+ynh_app_setting_set --app="$app" --key=client_lxc_plage_ip --value="$client_lxc_plage_ip"
+ynh_app_setting_set --app="$app" --key=client_lxc_main_iface --value="$client_lxc_main_iface"
-#=================================================
-# CHECK IF THE APP CAN BE INSTALLED WITH THESE ARGS
-#=================================================
-ynh_script_progression --message="Validating installation parameters..." --weight=1
-
-# Register (book) web path
-ynh_webpath_register --app=$app --domain=$domain --path_url=$path_url
-
-#=================================================
-# STORE SETTINGS FROM MANIFEST
-#=================================================
-ynh_script_progression --message="Storing installation settings..." --weight=1
-
-ynh_app_setting_set --app=$app --key=domain --value=$domain
-ynh_app_setting_set --app=$app --key=path --value=$path_url
-ynh_app_setting_set --app=$app --key=node_type --value=$node_type
-ynh_app_setting_set --app=$app --key=bootstrap_expect --value=$bootstrap_expect
-ynh_app_setting_set --app=$app --key=retry_join --value=$retry_join
-ynh_app_setting_set --app=$app --key=server_ip --value=$server_ip
-ynh_app_setting_set --app=$app --key=driver_lxc --value=$driver_lxc
-ynh_app_setting_set --app=$app --key=client_lxc_bridge --value=$client_lxc_bridge
-ynh_app_setting_set --app=$app --key=client_lxc_plage_ip --value=$client_lxc_plage_ip
-ynh_app_setting_set --app=$app --key=client_lxc_main_iface --value=$client_lxc_main_iface
-
-#=================================================
-# STANDARD MODIFICATIONS
-#=================================================
-# FIND AND OPEN A PORT
-#=================================================
-ynh_script_progression --message="Finding an available port..." --weight=1
-
-# Find an available port
-http_port=4646
-ynh_port_available --port=$http_port || ynh_die --message="Port $http_port is needs to be available for this app"
-ynh_app_setting_set --app=$app --key=http_port --value=$http_port
-
-rpc_port=4647
-ynh_port_available --port=$rpc_port || ynh_die --message="Port $rpc_port is needs to be available for this app"
-ynh_app_setting_set --app=$app --key=rpc_port --value=$rpc_port
-
-serf_port=4648
-ynh_port_available --port=$serf_port || ynh_die --message="Port $serf_port is needs to be available for this app"
-ynh_app_setting_set --app=$app --key=serf_port --value=$serf_port
-
-# Open the port
-ynh_script_progression --message="Configuring firewall..." --weight=1
-ynh_exec_warn_less yunohost firewall allow --no-upnp TCP $rpc_port
-needs_exposed_ports="$rpc_port"
-if [ "$node_type" == "server" ]
-then
- ynh_exec_warn_less yunohost firewall allow --no-upnp TCP $serf_port
- needs_exposed_ports="$serf_port $needs_exposed_ports"
-fi
-
-#=================================================
-# INSTALL DEPENDENCIES
-#=================================================
-ynh_script_progression --message="Installing dependencies..." --weight=1
-
-if [ "$node_type" == "server" ]
-then
- pkg_dependencies="$pkg_dependencies $server_pkg_dependencies"
-fi
-
-if [ "$node_type" == "client" ]
-then
- if [ $driver_lxc -eq 1 ]
- then
- client_pkg_dependencies="$client_pkg_dependencies $client_lxc_pkg_dependencies"
- ynh_exec_warn_less ynh_install_go --go_version=$go_version
- fi
- pkg_dependencies="$pkg_dependencies $client_pkg_dependencies"
-fi
-ynh_install_app_dependencies $pkg_dependencies
-ynh_install_extra_app_dependencies --repo="deb https://apt.releases.hashicorp.com $(lsb_release -cs) main" --package="$extra_pkg_dependencies" --key="https://apt.releases.hashicorp.com/gpg"
-
-#=================================================
-# CREATE DEDICATED USER
-#=================================================
-ynh_script_progression --message="Configuring system user..." --weight=1
-
-# Create a system user
-ynh_system_user_create --username=$app
-
-#=================================================
-# NGINX CONFIGURATION
-#=================================================
-ynh_script_progression --message="Configuring NGINX web server..." --weight=1
-
-# Create a dedicated NGINX config
-ynh_add_nginx_config
-
-#=================================================
-# SPECIFIC SETUP
#=================================================
# CREATE DATA DIRECTORY
#=================================================
-ynh_script_progression --message="Creating a data directory..." --weight=1
+ynh_script_progression --message="Configuring the data directory..." --weight=1
-datadir=/home/yunohost.app/$app
-ynh_app_setting_set --app=$app --key=datadir --value=$datadir
-
-mkdir -p $datadir
-mkdir -p $datadir/plugins
-
-chmod 750 "$datadir"
-chmod -R o-rwx "$datadir"
-chown -R $app:$app "$datadir"
+chmod -R o-rwx "$data_dir"
+chown -R "$app:$app" "$data_dir"
#=================================================
# BUILD DRIVERS
#=================================================
+if [ "$node_type" == "client" ] && [ "$driver_lxc" -eq 1 ]; then
+ ynh_script_progression --message="Installing Go..."
+ ynh_exec_warn_less ynh_install_go --go_version="$go_version"
+ ynh_use_go
-if [ "$node_type" == "client" ]
-then
- if [ $driver_lxc -eq 1 ]
- then
- ynh_script_progression --message="Building LXC driver..."
-
- tempdir="$(mktemp -d)"
- ynh_setup_source --dest_dir="$tempdir" --source_id="driver-lxc"
-
- pushd $tempdir
- final_path=$tempdir
- ynh_use_go
- export GOPATH="$tempdir/go"
- export GOCACHE="$tempdir/.cache"
- ynh_exec_warn_less $ynh_go build
- popd
-
- mv -f $tempdir/nomad-driver-lxc $datadir/plugins/nomad-driver-lxc
-
- ynh_secure_remove --file="$tempdir"
- fi
+ ynh_script_progression --message="Building LXC driver..."
+ ynh_setup_source --dest_dir="$install_dir/driver_lxc" --source_id="driver_lxc"
+ pushd "$install_dir/driver_lxc"
+ export GOPATH="$install_dir/driver_lxc/go"
+ export GOCACHE="$install_dir/driver_lxc/.cache"
+ ynh_exec_warn_less "$ynh_go" build
+ popd
+ mv -f "$install_dir/driver_lxc/nomad-driver-lxc" "$data_dir/plugins/nomad-driver-lxc"
+ ynh_secure_remove --file="$install_dir/driver_lxc"
fi
#=================================================
@@ -181,105 +52,62 @@ fi
ynh_script_progression --message="Adding a configuration file..." --weight=1
config_path=/etc/$app.d
-ynh_app_setting_set --app=$app --key=config_path --value=$config_path
+ynh_app_setting_set --app="$app" --key=config_path --value="$config_path"
-mkdir -p $config_path
-chmod 750 "$config_path"
-chmod -R o-rwx "$config_path"
-chown -R $app:$app "$config_path"
+mkdir -p "$config_path"
-ynh_add_config --template="../conf/nomad.hcl" --destination="$config_path/nomad.hcl"
-chmod 400 "$config_path/nomad.hcl"
-chown $app:$app "$config_path/nomad.hcl"
+ynh_add_config --template="nomad.hcl" --destination="$config_path/nomad.hcl"
-if [ "$node_type" == "server" ]
-then
- ynh_add_config --template="../conf/server.hcl" --destination="$config_path/server.hcl"
- chmod 400 "$config_path/server.hcl"
- chown $app:$app "$config_path/server.hcl"
+if [ "$node_type" == "server" ]; then
+ ynh_add_config --template="server.hcl" --destination="$config_path/server.hcl"
fi
-if [ "$node_type" == "client" ]
-then
- ynh_add_config --template="../conf/client.hcl" --destination="$config_path/client.hcl"
- chmod 400 "$config_path/client.hcl"
- chown $app:$app "$config_path/client.hcl"
-
- if [ $driver_lxc -eq 1 ]
- then
- ynh_add_config --template="../conf/driver-lxc.hcl" --destination="$config_path/driver-lxc.hcl"
- chmod 400 "$config_path/driver-lxc.hcl"
- chown $app:$app "$config_path/driver-lxc.hcl"
+if [ "$node_type" == "client" ]; then
+ ynh_add_config --template="client.hcl" --destination="$config_path/client.hcl"
- ynh_add_config --template="../conf/dnsmasq-lxd" --destination="/etc/dnsmasq.d/lxd"
- systemctl restart dnsmasq
+ if [ "$driver_lxc" -eq 1 ]; then
+ ynh_add_config --template="driver-lxc.hcl" --destination="$config_path/driver-lxc.hcl"
+ ynh_add_config --template="dnsmasq-lxd" --destination="/etc/dnsmasq.d/lxd"
+ systemctl restart dnsmasq
- if [ ! ${PACKAGE_CHECK_EXEC:-0} -eq 1 ]; then
- ynh_add_config --template="../conf/lxc-net" --destination="/etc/default/lxc-net"
- fi
- ynh_add_config --template="../conf/default.conf" --destination="/etc/lxc/default.conf"
- systemctl enable lxc-net --quiet
- ynh_systemd_action --service_name=lxc-net --action="restart" --line_match="Finished LXC network bridge setup" --log_path="systemd"
- fi
+ if [ ! "${PACKAGE_CHECK_EXEC:-0}" -eq 1 ]; then
+ ynh_add_config --template="lxc-net" --destination="/etc/default/lxc-net"
+ fi
+ ynh_add_config --template="default.conf" --destination="/etc/lxc/default.conf"
+ systemctl enable lxc-net --quiet
+ ynh_systemd_action --service_name=lxc-net --action="restart" --line_match="Finished LXC network bridge setup" --log_path="systemd"
+ fi
fi
+chmod -R go-rwx,u-w "$config_path"
+chown -R "$app:$app" "$config_path"
+
#=================================================
-# SETUP SYSTEMD
+# SYSTEM CONFIGURATION
#=================================================
-ynh_script_progression --message="Configuring a systemd service..." --weight=1
+ynh_script_progression --message="Adding system configurations related to $app..." --weight=1
+
+# Create a dedicated NGINX config
+ynh_add_nginx_config
-systemd_user=$app
-if [ "$node_type" == "client" ]
-then
- systemd_user="root"
-fi
# Create a dedicated systemd config
+case "$node_type" in
+ client) systemd_user="root" ;;
+ server) systemd_user="$app" ;;
+esac
ynh_add_systemd_config
-
-#=================================================
-# GENERIC FINALIZATION
-#=================================================
-# SETUP LOGROTATE
-#=================================================
-ynh_script_progression --message="Configuring log rotation..." --weight=1
+yunohost service add "$app" --log="/var/log/$app/$app.log" --needs_exposed_ports "$port_rpc" "$port_serf"
# Use logrotate to manage application logfile(s)
ynh_use_logrotate
-#=================================================
-# INTEGRATE SERVICE IN YUNOHOST
-#=================================================
-ynh_script_progression --message="Integrating service in YunoHost..." --weight=1
-
-yunohost service add $app --log="/var/log/$app/$app.log" --needs_exposed_ports $needs_exposed_ports
-
#=================================================
# START SYSTEMD SERVICE
#=================================================
ynh_script_progression --message="Starting a systemd service..." --weight=1
# Start a systemd service
-ynh_systemd_action --service_name=$app --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
-
-#=================================================
-# SETUP SSOWAT
-#=================================================
-ynh_script_progression --message="Configuring permissions..." --weight=1
-
-# Make app public if necessary
-if [ $is_public -eq 1 ]
-then
- # Everyone can access the app.
- # The "main" permission is automatically created before the install script.
- ynh_permission_update --permission="main" --add="visitors"
-fi
-
-#=================================================
-# RELOAD NGINX
-#=================================================
-ynh_script_progression --message="Reloading NGINX web server..." --weight=1
-
-ynh_systemd_action --service_name=nginx --action=reload
+ynh_systemd_action --service_name="$app" --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# END OF SCRIPT
diff --git a/scripts/remove b/scripts/remove
index 7619b15..8bf88f2 100755
--- a/scripts/remove
+++ b/scripts/remove
@@ -1,7 +1,5 @@
#!/bin/bash
-#=================================================
-# GENERIC START
#=================================================
# IMPORT GENERIC HELPERS
#=================================================
@@ -11,118 +9,49 @@ source ynh_install_go
source /usr/share/yunohost/helpers
#=================================================
-# LOAD SETTINGS
+# Stopping Nomad
#=================================================
-ynh_script_progression --message="Loading installation settings..." --weight=1
-
-app=$YNH_APP_INSTANCE_NAME
-
-domain=$(ynh_app_setting_get --app=$app --key=domain)
-config_path=$(ynh_app_setting_get --app=$app --key=config_path)
-datadir=$(ynh_app_setting_get --app=$app --key=datadir)
-node_type=$(ynh_app_setting_get --app=$app --key=node_type)
-driver_lxc=$(ynh_app_setting_get --app=$app --key=driver_lxc)
-rpc_port=$(ynh_app_setting_get --app=$app --key=rpc_port)
-serf_port=$(ynh_app_setting_get --app=$app --key=serf_port)
-
-#=================================================
-# STANDARD REMOVE
-#=================================================
-# REMOVE SERVICE INTEGRATION IN YUNOHOST
-#=================================================
-
-# Remove the service from the list of services known by YunoHost (added from `yunohost service add`)
-if ynh_exec_warn_less yunohost service status $app >/dev/null
-then
- ynh_script_progression --message="Removing $app service integration..." --weight=1
- yunohost service remove $app
-fi
-
-#=================================================
-# STOP AND REMOVE SERVICE
-#=================================================
-ynh_script_progression --message="Stopping and removing the systemd service..." --weight=1
+ynh_script_progression --message="Trying to stop gracefully $app..." --weight=1
ynh_exec_warn_less timeout 25 nomad node drain -self -enable -yes -deadline 20s
-# Remove the dedicated systemd config
-ynh_remove_systemd_config
#=================================================
-# REMOVE LOGROTATE CONFIGURATION
+# REMOVE SYSTEM CONFIGURATIONS
#=================================================
-ynh_script_progression --message="Removing logrotate configuration..." --weight=1
+ynh_script_progression --message="Removing system configurations related to $app..." --weight=1
+
+# Remove the service from the list of services known by YunoHost (added from `yunohost service add`)
+if ynh_exec_warn_less yunohost service status "$app" >/dev/null; then
+ yunohost service remove "$app"
+fi
+# Remove the dedicated systemd config
+ynh_remove_systemd_config
# Remove the app-specific logrotate config
ynh_remove_logrotate
-#=================================================
-# REMOVE NGINX CONFIGURATION
-#=================================================
-ynh_script_progression --message="Removing NGINX web server configuration..." --weight=1
-
# Remove the dedicated NGINX config
ynh_remove_nginx_config
-#=================================================
-# CLOSE A PORT
-#=================================================
+if [ "$node_type" == "client" ] && [ "$driver_lxc" -eq 1 ]; then
+ client_lxc_bridge=$(ynh_app_setting_get --app="$app" --key=client_lxc_bridge)
-if yunohost firewall list | grep -q "\- $rpc_port$"
-then
- ynh_script_progression --message="Closing port $rpc_port..." --weight=1
- ynh_exec_warn_less yunohost firewall disallow TCP $rpc_port
-fi
-
-if yunohost firewall list | grep -q "\- $serf_port$"
-then
- ynh_script_progression --message="Closing port $serf_port..." --weight=1
- ynh_exec_warn_less yunohost firewall disallow TCP $serf_port
-fi
-
-if [ "$node_type" == "client" ]
-then
-
- if [ $driver_lxc -eq 1 ]
- then
- client_lxc_bridge=$(ynh_app_setting_get --app=$app --key=client_lxc_bridge)
-
- ynh_systemd_action --service_name=lxc-net --action="stop"
- systemctl disable lxc-net --quiet
- ynh_secure_remove --file="/etc/default/lxc-net"
- ynh_secure_remove --file="/etc/lxc/default.conf"
- ynh_secure_remove --file="/etc/dnsmasq.d/lxd"
- systemctl restart dnsmasq
- fi
+ ynh_systemd_action --service_name=lxc-net --action="stop"
+ systemctl disable lxc-net --quiet
+ ynh_secure_remove --file="/etc/default/lxc-net"
+ ynh_secure_remove --file="/etc/lxc/default.conf"
+ ynh_secure_remove --file="/etc/dnsmasq.d/lxd"
+ systemctl restart dnsmasq
fi
#=================================================
# REMOVE DEPENDENCIES
#=================================================
-ynh_script_progression --message="Removing dependencies..." --weight=1
-
-# Remove metapackage and its dependencies
-ynh_remove_app_dependencies
-if [ "$node_type" == "client" ]
-then
- if [ $driver_lxc -eq 1 ]
- then
- ynh_remove_go
- fi
+if [ "$node_type" == "client" ] && [ "$driver_lxc" -eq 1 ]; then
+ ynh_script_progression --message="Removing Go..." --weight=1
+ ynh_remove_go
fi
-#=================================================
-# REMOVE DATA DIR
-#=================================================
-
-# Remove the data directory if --purge option is used
-if [ "${YNH_APP_PURGE:-0}" -eq 1 ]
-then
- ynh_script_progression --message="Removing app data directory..." --weight=1
- ynh_secure_remove --file="$datadir"
-fi
-
-#=================================================
-# SPECIFIC REMOVE
#=================================================
# REMOVE VARIOUS FILES
#=================================================
@@ -134,16 +63,6 @@ ynh_secure_remove --file="$config_path"
# Remove the log files
ynh_secure_remove --file="/var/log/$app"
-#=================================================
-# GENERIC FINALIZATION
-#=================================================
-# REMOVE DEDICATED USER
-#=================================================
-ynh_script_progression --message="Removing the dedicated system user..." --weight=1
-
-# Delete a system user
-ynh_system_user_delete --username=$app
-
#=================================================
# END OF SCRIPT
#=================================================
diff --git a/scripts/restore b/scripts/restore
index fb513e8..3f336b3 100644
--- a/scripts/restore
+++ b/scripts/restore
@@ -1,7 +1,5 @@
#!/bin/bash
-#=================================================
-# GENERIC START
#=================================================
# IMPORT GENERIC HELPERS
#=================================================
@@ -11,89 +9,22 @@ source ../settings/scripts/_common.sh
source /usr/share/yunohost/helpers
#=================================================
-# MANAGE SCRIPT FAILURE
+# RESTORE THE APP MAIN DIR
#=================================================
+ynh_script_progression --message="Restoring the app main directory..." --weight=1
-ynh_clean_setup () {
- true
-}
-# Exit if an error occurs during the execution of the script
-ynh_abort_if_errors
+ynh_restore_file --origin_path="$install_dir"
-#=================================================
-# LOAD SETTINGS
-#=================================================
-ynh_script_progression --message="Loading installation settings..." --weight=1
-
-app=$YNH_APP_INSTANCE_NAME
-
-domain=$(ynh_app_setting_get --app=$app --key=domain)
-path_url=$(ynh_app_setting_get --app=$app --key=path)
-config_path=$(ynh_app_setting_get --app=$app --key=config_path)
-datadir=$(ynh_app_setting_get --app=$app --key=datadir)
-node_type=$(ynh_app_setting_get --app=$app --key=node_type)
-driver_lxc=$(ynh_app_setting_get --app=$app --key=driver_lxc)
-http_port=$(ynh_app_setting_get --app=$app --key=http_port)
-rpc_port=$(ynh_app_setting_get --app=$app --key=rpc_port)
-serf_port=$(ynh_app_setting_get --app=$app --key=serf_port)
-
-#=================================================
-# CHECK IF THE APP CAN BE RESTORED
-#=================================================
-ynh_script_progression --message="Validating restoration parameters..." --weight=1
-
-#=================================================
-# STANDARD RESTORATION STEPS
-#=================================================
-# RECREATE THE DEDICATED USER
-#=================================================
-ynh_script_progression --message="Recreating the dedicated system user..." --weight=1
-
-# Create the dedicated user (if not existing)
-ynh_system_user_create --username=$app
+chown -R "$app:$app" "$install_dir"
#=================================================
# RESTORE THE DATA DIRECTORY
#=================================================
ynh_script_progression --message="Restoring the data directory..." --weight=1
-ynh_restore_file --origin_path="$datadir" --not_mandatory
+ynh_restore_file --origin_path="$data_dir" --not_mandatory
-mkdir -p $datadir
-
-chmod 750 "$datadir"
-chmod -R o-rwx "$datadir"
-chown -R $app:$app "$datadir"
-
-#=================================================
-# SPECIFIC RESTORATION
-#=================================================
-# REINSTALL DEPENDENCIES
-#=================================================
-ynh_script_progression --message="Reinstalling dependencies..." --weight=1
-
-if [ "$node_type" == "server" ]
-then
- pkg_dependencies="$pkg_dependencies $server_pkg_dependencies"
-fi
-
-if [ "$node_type" == "client" ]
-then
- if [ $driver_lxc -eq 1 ]
- then
- client_pkg_dependencies="$client_pkg_dependencies $client_lxc_pkg_dependencies"
- fi
- pkg_dependencies="$pkg_dependencies $client_pkg_dependencies"
-fi
-ynh_install_app_dependencies $pkg_dependencies
-ynh_install_extra_app_dependencies --repo="deb https://apt.releases.hashicorp.com $(lsb_release -cs) main" --package="$extra_pkg_dependencies" --key="https://apt.releases.hashicorp.com/gpg"
-
-#=================================================
-# RESTORE THE NGINX CONFIGURATION
-#=================================================
-ynh_script_progression --message="Restoring the NGINX web server configuration..." --weight=1
-
-ynh_restore_file --origin_path="/etc/nginx/conf.d/$domain.d/$app.conf"
+chown -R "$app:$app" "$data_dir"
#=================================================
# RESTORE VARIOUS FILES
@@ -102,79 +33,48 @@ ynh_script_progression --message="Restoring various files..." --weight=1
ynh_restore_file --origin_path="$config_path"
-chmod 750 "$config_path"
-chmod -R o-rwx "$config_path"
-chown -R $app:$app "$config_path"
+chmod -R go-rwx,u-w "$config_path"
+chown -R "$app:$app" "$config_path"
-# Open the port
-ynh_script_progression --message="Configuring firewall..."
-ynh_exec_warn_less yunohost firewall allow --no-upnp TCP $rpc_port
-needs_exposed_ports="$rpc_port"
-if [ "$node_type" == "server" ]
-then
- ynh_exec_warn_less yunohost firewall allow --no-upnp TCP $serf_port
- needs_exposed_ports="$serf_port $needs_exposed_ports"
-fi
+if [ "$node_type" == "client" ]; then
+ if [ "$driver_lxc" -eq 1 ]; then
+ client_lxc_main_iface=$(ip route | grep default | awk '{print $5;}')
+ ynh_app_setting_set --app="$app" --key=client_lxc_main_iface --value="$client_lxc_main_iface"
-if [ "$node_type" == "client" ]
-then
- if [ $driver_lxc -eq 1 ]
- then
- client_lxc_bridge=$(ynh_app_setting_get --app=$app --key=client_lxc_bridge)
- client_lxc_plage_ip=$(ynh_app_setting_get --app=$app --key=client_lxc_plage_ip)
- client_lxc_main_iface=$(ip route | grep default | awk '{print $5;}')
- ynh_app_setting_set --app=$app --key=client_lxc_main_iface --value=$client_lxc_main_iface
+ ynh_add_config --template="dnsmasq-lxd" --destination="/etc/dnsmasq.d/lxd"
+ systemctl restart dnsmasq
- ynh_add_config --template="../conf/dnsmasq-lxd" --destination="/etc/dnsmasq.d/lxd"
- systemctl restart dnsmasq
-
- if [ ! ${PACKAGE_CHECK_EXEC:-0} -eq 1 ]; then
- ynh_add_config --template="../conf/lxc-net" --destination="/etc/default/lxc-net"
- fi
- ynh_secure_remove --file="/etc/lxc/default.conf"
- ynh_add_config --template="../conf/default.conf" --destination="/etc/lxc/default.conf"
- systemctl enable lxc-net --quiet
- ynh_systemd_action --service_name=lxc-net --action="restart" --line_match="Finished LXC network bridge setup" --log_path="systemd"
- fi
+ if [ ! "${PACKAGE_CHECK_EXEC:-0}" -eq 1 ]; then
+ ynh_add_config --template="lxc-net" --destination="/etc/default/lxc-net"
+ fi
+ ynh_secure_remove --file="/etc/lxc/default.conf"
+ ynh_add_config --template="default.conf" --destination="/etc/lxc/default.conf"
+ systemctl enable lxc-net --quiet
+ ynh_systemd_action --service_name=lxc-net --action="restart" --line_match="Finished LXC network bridge setup" --log_path="systemd"
+ fi
fi
#=================================================
-# RESTORE SYSTEMD
+# RESTORE SYSTEM CONFIGURATIONS
#=================================================
-ynh_script_progression --message="Restoring the systemd configuration..." --weight=1
+ynh_script_progression --message="Restoring system configurations related to $app..." --weight=1
+
+ynh_restore_file --origin_path="/etc/nginx/conf.d/$domain.d/$app.conf"
ynh_restore_file --origin_path="/etc/systemd/system/$app.service"
-systemctl enable $app.service --quiet
+systemctl enable "$app.service" --quiet
+yunohost service add "$app" --log="/var/log/$app/$app.log" --needs_exposed_ports "$port_rpc" "$port_serf"
-#=================================================
-# RESTORE THE LOGROTATE CONFIGURATION
-#=================================================
-ynh_script_progression --message="Restoring the logrotate configuration..." --weight=1
-
-mkdir -p /var/log/$app
-chown -R $app:$app "/var/log/$app"
ynh_restore_file --origin_path="/etc/logrotate.d/$app"
-#=================================================
-# INTEGRATE SERVICE IN YUNOHOST
-#=================================================
-ynh_script_progression --message="Integrating service in YunoHost..." --weight=1
-
-yunohost service add $app --log="/var/log/$app/$app.log" --needs_exposed_ports $needs_exposed_ports
+ynh_restore_file --origin_path="/var/log/$app/"
#=================================================
-# START SYSTEMD SERVICE
+# RELOAD NGINX AND PHP-FPM OR THE APP SERVICE
#=================================================
-ynh_script_progression --message="Starting a systemd service..." --weight=1
+ynh_script_progression --message="Reloading NGINX web server and $app's service..." --weight=1
-ynh_systemd_action --service_name=$app --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
-
-#=================================================
-# GENERIC FINALIZATION
-#=================================================
-# RELOAD NGINX
-#=================================================
-ynh_script_progression --message="Reloading NGINX web server..." --weight=1
+ynh_systemd_action --service_name="$app" --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
ynh_systemd_action --service_name=nginx --action=reload
diff --git a/scripts/upgrade b/scripts/upgrade
index 807e673..b3a12ab 100644
--- a/scripts/upgrade
+++ b/scripts/upgrade
@@ -1,7 +1,5 @@
#!/bin/bash
-#=================================================
-# GENERIC START
#=================================================
# IMPORT GENERIC HELPERS
#=================================================
@@ -10,133 +8,35 @@ source _common.sh
source ynh_install_go
source /usr/share/yunohost/helpers
-#=================================================
-# LOAD SETTINGS
-#=================================================
-ynh_script_progression --message="Loading installation settings..." --weight=1
-
-app=$YNH_APP_INSTANCE_NAME
-
-domain=$(ynh_app_setting_get --app=$app --key=domain)
-path_url=$(ynh_app_setting_get --app=$app --key=path)
-config_path=$(ynh_app_setting_get --app=$app --key=config_path)
-datadir=$(ynh_app_setting_get --app=$app --key=datadir)
-node_type=$(ynh_app_setting_get --app=$app --key=node_type)
-bootstrap_expect=$(ynh_app_setting_get --app=$app --key=bootstrap_expect)
-retry_join=$(ynh_app_setting_get --app=$app --key=retry_join)
-server_ip=$(ynh_app_setting_get --app=$app --key=server_ip)
-driver_lxc=$(ynh_app_setting_get --app=$app --key=driver_lxc)
-http_port=$(ynh_app_setting_get --app=$app --key=http_port)
-rpc_port=$(ynh_app_setting_get --app=$app --key=rpc_port)
-serf_port=$(ynh_app_setting_get --app=$app --key=serf_port)
-
-#=================================================
-# CHECK VERSION
-#=================================================
-ynh_script_progression --message="Checking version..." --weight=1
-
-upgrade_type=$(ynh_check_app_version_changed)
-
-#=================================================
-# BACKUP BEFORE UPGRADE THEN ACTIVE TRAP
-#=================================================
-ynh_script_progression --message="Backing up the app before upgrading (may take a while)..." --weight=1
-
-# Backup the current version of the app
-ynh_backup_before_upgrade
-ynh_clean_setup () {
- # Restore it if the upgrade fails
- ynh_restore_upgradebackup
-}
-# Exit if an error occurs during the execution of the script
-ynh_abort_if_errors
-
-#=================================================
-# STANDARD UPGRADE STEPS
#=================================================
# STOP SYSTEMD SERVICE
#=================================================
-ynh_script_progression --message="Stopping a systemd service..." --weight=1
+ynh_script_progression --message="Stopping $app's systemd service..." --weight=1
-ynh_systemd_action --service_name=$app --action="stop" --log_path="/var/log/$app/$app.log"
+ynh_systemd_action --service_name="$app" --action="stop" --log_path="/var/log/$app/$app.log"
#=================================================
# ENSURE DOWNWARD COMPATIBILITY
#=================================================
-ynh_script_progression --message="Ensuring downward compatibility..." --weight=1
+# ynh_script_progression --message="Ensuring downward compatibility..." --weight=1
-# Cleaning legacy permissions
-if ynh_legacy_permissions_exists; then
- ynh_legacy_permissions_delete_all
-
- ynh_app_setting_delete --app=$app --key=is_public
-fi
-
-#=================================================
-# CREATE DEDICATED USER
-#=================================================
-ynh_script_progression --message="Making sure dedicated system user exists..." --weight=1
-
-# Create a dedicated user (if not existing)
-ynh_system_user_create --username=$app
-
-#=================================================
-# UPGRADE DEPENDENCIES
-#=================================================
-ynh_script_progression --message="Upgrading dependencies..." --weight=1
-
-if [ "$node_type" == "server" ]
-then
- pkg_dependencies="$pkg_dependencies $server_pkg_dependencies"
-fi
-
-if [ "$node_type" == "client" ]
-then
- if [ $driver_lxc -eq 1 ]
- then
- client_pkg_dependencies="$client_pkg_dependencies $client_lxc_pkg_dependencies"
- ynh_exec_warn_less ynh_install_go --go_version=$go_version
- fi
- pkg_dependencies="$pkg_dependencies $client_pkg_dependencies"
-fi
-ynh_install_app_dependencies $pkg_dependencies
-ynh_install_extra_app_dependencies --repo="deb https://apt.releases.hashicorp.com $(lsb_release -cs) main" --package="$extra_pkg_dependencies" --key="https://apt.releases.hashicorp.com/gpg"
-
-#=================================================
-# NGINX CONFIGURATION
-#=================================================
-ynh_script_progression --message="Upgrading NGINX web server configuration..." --weight=1
-
-# Create a dedicated NGINX config
-ynh_add_nginx_config
-
-#=================================================
-# SPECIFIC UPGRADE
#=================================================
# BUILD DRIVERS
#=================================================
+if [ "$node_type" == "client" ] && [ "$driver_lxc" -eq 1 ]; then
+ ynh_script_progression --message="Installing Go..."
+ ynh_exec_warn_less ynh_install_go --go_version="$go_version"
+ ynh_use_go
-if [ "$node_type" == "client" ]
-then
- if [ $driver_lxc -eq 1 ]
- then
- ynh_script_progression --message="Building LXC driver..." --weight=1
-
- tempdir="$(mktemp -d)"
- ynh_setup_source --dest_dir="$tempdir" --source_id="driver-lxc"
-
- pushd $tempdir
- final_path=$tempdir
- ynh_use_go
- export GOPATH="$tempdir/go"
- export GOCACHE="$tempdir/.cache"
- ynh_exec_warn_less $ynh_go build
- popd
-
- mv -f $tempdir/nomad-driver-lxc $datadir/plugins/nomad-driver-lxc
-
- ynh_secure_remove --file="$tempdir"
- fi
+ ynh_script_progression --message="Building LXC driver..."
+ ynh_setup_source --dest_dir="$install_dir/driver_lxc" --source_id="driver_lxc"
+ pushd "$install_dir/driver_lxc"
+ export GOPATH="$install_dir/driver_lxc/go"
+ export GOCACHE="$install_dir/driver_lxc/.cache"
+ ynh_exec_warn_less "$ynh_go" build
+ popd
+ mv -f "$install_dir/driver_lxc/nomad-driver-lxc" "$data_dir/plugins/nomad-driver-lxc"
+ ynh_secure_remove --file="$install_dir/driver_lxc"
fi
#=================================================
@@ -144,84 +44,50 @@ fi
#=================================================
ynh_script_progression --message="Updating a configuration file..." --weight=1
-mkdir -p $config_path
-chmod 750 "$config_path"
-chmod -R o-rwx "$config_path"
-chown -R $app:$app "$config_path"
+mkdir -p "$config_path"
-ynh_add_config --template="../conf/nomad.hcl" --destination="$config_path/nomad.hcl"
-chmod 400 "$config_path/nomad.hcl"
-chown $app:$app "$config_path/nomad.hcl"
+ynh_add_config --template="nomad.hcl" --destination="$config_path/nomad.hcl"
-if [ "$node_type" == "server" ]
-then
- ynh_add_config --template="../conf/server.hcl" --destination="$config_path/server.hcl"
- chmod 400 "$config_path/server.hcl"
- chown $app:$app "$config_path/server.hcl"
+if [ "$node_type" == "server" ]; then
+ ynh_add_config --template="server.hcl" --destination="$config_path/server.hcl"
fi
-if [ "$node_type" == "client" ]
-then
- ynh_add_config --template="../conf/client.hcl" --destination="$config_path/client.hcl"
- chmod 400 "$config_path/client.hcl"
- chown $app:$app "$config_path/client.hcl"
-
- if [ $driver_lxc -eq 1 ]
- then
- ynh_add_config --template="../conf/driver-lxc.hcl" --destination="$config_path/driver-lxc.hcl"
- chmod 400 "$config_path/driver-lxc.hcl"
- chown $app:$app "$config_path/driver-lxc.hcl"
- fi
+if [ "$node_type" == "client" ]; then
+ ynh_add_config --template="client.hcl" --destination="$config_path/client.hcl"
+
+ if [ "$driver_lxc" -eq 1 ]; then
+ ynh_add_config --template="driver-lxc.hcl" --destination="$config_path/driver-lxc.hcl"
+ fi
fi
+chmod -R go-rwx,u-w "$config_path"
+chown -R "$app:$app" "$config_path"
+
#=================================================
-# SETUP SYSTEMD
+# REAPPLY SYSTEM CONFIGURATIONS
#=================================================
-ynh_script_progression --message="Upgrading systemd configuration..." --weight=1
+ynh_script_progression --message="Upgrading system configurations related to $app..." --weight=1
+
+# Create a dedicated NGINX config
+ynh_add_nginx_config
-systemd_user=$app
-if [ "$node_type" == "client" ]
-then
- systemd_user="root"
-fi
# Create a dedicated systemd config
+case "$node_type" in
+ client) systemd_user="root" ;;
+ server) systemd_user="$app" ;;
+esac
ynh_add_systemd_config
-
-#=================================================
-# GENERIC FINALIZATION
-#=================================================
-# SETUP LOGROTATE
-#=================================================
-ynh_script_progression --message="Upgrading logrotate configuration..." --weight=1
+yunohost service add "$app" --log="/var/log/$app/$app.log" --needs_exposed_ports "$port_rpc" "$port_serf"
# Use logrotate to manage app-specific logfile(s)
ynh_use_logrotate --non-append
-#=================================================
-# INTEGRATE SERVICE IN YUNOHOST
-#=================================================
-ynh_script_progression --message="Integrating service in YunoHost..." --weight=1
-
-needs_exposed_ports="$rpc_port"
-if [ "$node_type" == "server" ]
-then
- needs_exposed_ports="$serf_port $needs_exposed_ports"
-fi
-yunohost service add $app --log="/var/log/$app/$app.log" --needs_exposed_ports $needs_exposed_ports
-
#=================================================
# START SYSTEMD SERVICE
#=================================================
-ynh_script_progression --message="Starting a systemd service..." --weight=1
+ynh_script_progression --message="Starting $app's systemd service..." --weight=1
-ynh_systemd_action --service_name=$app --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
-
-#=================================================
-# RELOAD NGINX
-#=================================================
-ynh_script_progression --message="Reloading NGINX web server..." --weight=1
-
-ynh_systemd_action --service_name=nginx --action=reload
+ynh_systemd_action --service_name="$app" --action="start" --log_path="/var/log/$app/$app.log" --line_match="Nomad agent started"
#=================================================
# END OF SCRIPT
diff --git a/scripts/ynh_install_go b/scripts/ynh_install_go
index e3ad06a..07695dd 100644
--- a/scripts/ynh_install_go
+++ b/scripts/ynh_install_go
@@ -30,7 +30,7 @@ export GOENV_ROOT="$goenv_install_dir"
# However, $PATH is duplicated into $go_path to outlast any manipulation of $PATH
# You can use the variable `$ynh_go_load_path` to quickly load your Go version
# in $PATH for an usage into a separate script.
-# Exemple: $ynh_go_load_path $final_path/script_that_use_gem.sh`
+# Exemple: $ynh_go_load_path $install_dir/script_that_use_gem.sh`
#
#
# Finally, to start a Go service with the correct version, 2 solutions
@@ -72,7 +72,7 @@ ynh_use_go () {
ynh_go_load_path="PATH=$PATH"
# Sets the local application-specific Go version
- pushd $final_path
+ pushd $install_dir
$goenv_install_dir/bin/goenv local $go_version
popd
}
@@ -224,7 +224,7 @@ ynh_cleanup_go () {
required_go_versions="${installed_app_go_version}\n${required_go_versions}"
fi
done
-
+
# Remove no more needed Go versions
local installed_go_versions=$(goenv versions --bare --skip-aliases | grep -Ev '/')
for installed_go_version in $installed_go_versions
diff --git a/tests.toml b/tests.toml
new file mode 100644
index 0000000..78744d8
--- /dev/null
+++ b/tests.toml
@@ -0,0 +1,18 @@
+#:schema https://raw.githubusercontent.com/YunoHost/apps/master/schemas/tests.v1.schema.json
+
+test_format = 1.0
+
+[default]
+
+ args.node_type = "server"
+ args.retry_join = "192.168.1.100"
+ args.server_ip = "none..."
+
+ # FIXME:
+ # test_upgrade_from.
+
+[tests_as_client]
+
+ args.node_type = "client"
+ args.retry_join = "192.168.1.100"
+ args.server_ip = "192.168.1.100"