From 53e6468c7a20898197474b2bdbb4eaa820223f66 Mon Sep 17 00:00:00 2001 From: Jimmy Monin Date: Sat, 3 Sep 2022 19:03:29 +0200 Subject: [PATCH 1/6] Upgrade to upstream version 9.1.2 --- manifest.json | 2 +- scripts/_common.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/manifest.json b/manifest.json index d4ca0ed..876e46d 100644 --- a/manifest.json +++ b/manifest.json @@ -6,7 +6,7 @@ "en": "Metric & analytic dashboards for monitoring", "fr": "Tableaux de bords de supervision" }, - "version": "8.3.3~ynh2", + "version": "9.1.2~ynh1", "url": "https://grafana.com/oss/grafana/", "upstream": { "license": "AGPL-3.0-only", diff --git a/scripts/_common.sh b/scripts/_common.sh index f59a33f..e501d76 100644 --- a/scripts/_common.sh +++ b/scripts/_common.sh @@ -5,7 +5,7 @@ #================================================= # Debian package version for Grafana -GRAFANA_VERSION="8.3.3" +GRAFANA_VERSION="9.1.2" # dependencies used by the app pkg_dependencies="influxdb" From 49569cca5835465cfeaf8623aa56c7d8d5de3333 Mon Sep 17 00:00:00 2001 From: Jimmy Monin Date: Sat, 3 Sep 2022 19:03:51 +0200 Subject: [PATCH 2/6] Fix NetData & InfluxDB configuration --- conf/influxdb.conf | 439 --------------------------------------------- scripts/install | 14 +- 2 files changed, 6 insertions(+), 447 deletions(-) delete mode 100644 conf/influxdb.conf diff --git a/conf/influxdb.conf b/conf/influxdb.conf deleted file mode 100644 index 5a0620b..0000000 --- a/conf/influxdb.conf +++ /dev/null @@ -1,439 +0,0 @@ -### Welcome to the InfluxDB configuration file. - -# The values in this file override the default values used by the system if -# a config option is not specified. The commented out lines are the the configuration -# field and the default value used. Uncommentting a line and changing the value -# will change the value used at runtime when the process is restarted. - -# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com -# The data includes a random ID, os, arch, version, the number of series and other -# usage data. No data from user databases is ever transmitted. -# Change this option to true to disable reporting. -# reporting-disabled = false - -# we'll try to get the hostname automatically, but if it the os returns something -# that isn't resolvable by other servers in the cluster, use this option to -# manually set the hostname -# hostname = "localhost" - -### -### [meta] -### -### Controls the parameters for the Raft consensus group that stores metadata -### about the InfluxDB cluster. -### - -[meta] - # Where the metadata/raft database is stored - dir = "/var/lib/influxdb/meta" - - # Automatically create a default retention policy when creating a database. - # retention-autocreate = true - - # If log messages are printed for the meta service - # logging-enabled = true - -### -### [data] -### -### Controls where the actual shard data for InfluxDB lives and how it is -### flushed from the WAL. "dir" may need to be changed to a suitable place -### for your system, but the WAL settings are an advanced configuration. The -### defaults should work for most systems. -### - -[data] - # The directory where the TSM storage engine stores TSM files. - dir = "/var/lib/influxdb/data" - - # The directory where the TSM storage engine stores WAL files. - wal-dir = "/var/lib/influxdb/wal" - - # Trace logging provides more verbose output around the tsm engine. Turning - # this on can provide more useful output for debugging tsm engine issues. - # trace-logging-enabled = false - - # Whether queries should be logged before execution. Very useful for troubleshooting, but will - # log any sensitive data contained within a query. - # query-log-enabled = true - - # Settings for the TSM engine - - # CacheMaxMemorySize is the maximum size a shard's cache can - # reach before it starts rejecting writes. - # cache-max-memory-size = 1048576000 - - # CacheSnapshotMemorySize is the size at which the engine will - # snapshot the cache and write it to a TSM file, freeing up memory - # cache-snapshot-memory-size = 26214400 - - # CacheSnapshotWriteColdDuration is the length of time at - # which the engine will snapshot the cache and write it to - # a new TSM file if the shard hasn't received writes or deletes - # cache-snapshot-write-cold-duration = "10m" - - # CompactFullWriteColdDuration is the duration at which the engine - # will compact all TSM files in a shard if it hasn't received a - # write or delete - # compact-full-write-cold-duration = "4h" - - # The maximum series allowed per database before writes are dropped. This limit can prevent - # high cardinality issues at the database level. This limit can be disabled by setting it to - # 0. - # max-series-per-database = 1000000 - - # The maximum number of tag values per tag that are allowed before writes are dropped. This limit - # can prevent high cardinality tag values from being written to a measurement. This limit can be - # disabled by setting it to 0. - # max-values-per-tag = 100000 - -### -### [coordinator] -### -### Controls the clustering service configuration. -### - -[coordinator] - # The default time a write request will wait until a "timeout" error is returned to the caller. - # write-timeout = "10s" - - # The maximum number of concurrent queries allowed to be executing at one time. If a query is - # executed and exceeds this limit, an error is returned to the caller. This limit can be disabled - # by setting it to 0. - # max-concurrent-queries = 0 - - # The maximum time a query will is allowed to execute before being killed by the system. This limit - # can help prevent run away queries. Setting the value to 0 disables the limit. - # query-timeout = "0s" - - # The the time threshold when a query will be logged as a slow query. This limit can be set to help - # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. - # log-queries-after = "0s" - - # The maximum number of points a SELECT can process. A value of 0 will make the maximum - # point count unlimited. - # max-select-point = 0 - - # The maximum number of series a SELECT can run. A value of 0 will make the maximum series - # count unlimited. - - # The maximum number of series a SELECT can run. A value of zero will make the maximum series - # count unlimited. - # max-select-series = 0 - - # The maxium number of group by time bucket a SELECt can create. A value of zero will max the maximum - # number of buckets unlimited. - # max-select-buckets = 0 - -### -### [retention] -### -### Controls the enforcement of retention policies for evicting old data. -### - -[retention] - # Determines whether retention policy enforcment enabled. - # enabled = true - - # The interval of time when retention policy enforcement checks run. - # check-interval = "30m" - -### -### [shard-precreation] -### -### Controls the precreation of shards, so they are available before data arrives. -### Only shards that, after creation, will have both a start- and end-time in the -### future, will ever be created. Shards are never precreated that would be wholly -### or partially in the past. - -[shard-precreation] - # Determines whether shard pre-creation service is enabled. - # enabled = true - - # The interval of time when the check to pre-create new shards runs. - # check-interval = "10m" - - # The default period ahead of the endtime of a shard group that its successor - # group is created. - # advance-period = "30m" - -### -### Controls the system self-monitoring, statistics and diagnostics. -### -### The internal database for monitoring data is created automatically if -### if it does not already exist. The target retention within this database -### is called 'monitor' and is also created with a retention period of 7 days -### and a replication factor of 1, if it does not exist. In all cases the -### this retention policy is configured as the default for the database. - -[monitor] - # Whether to record statistics internally. - # store-enabled = true - - # The destination database for recorded statistics - # store-database = "_internal" - - # The interval at which to record statistics - # store-interval = "10s" - -### -### [admin] -### -### Controls the availability of the built-in, web-based admin interface. If HTTPS is -### enabled for the admin interface, HTTPS must also be enabled on the [http] service. -### -### NOTE: This interface is deprecated as of 1.1.0 and will be removed in a future release. - -[admin] - # Determines whether the admin service is enabled. - # enabled = false - - # The default bind address used by the admin service. - # bind-address = ":8083" - - # Whether the admin service should use HTTPS. - # https-enabled = false - - # The SSL certificate used when HTTPS is enabled. - # https-certificate = "/etc/ssl/influxdb.pem" - -### -### [http] -### -### Controls how the HTTP endpoints are configured. These are the primary -### mechanism for getting data into and out of InfluxDB. -### - -[http] - # Determines whether HTTP endpoint is enabled. - # enabled = true - - # The bind address used by the HTTP service. - # bind-address = ":8086" - - # Determines whether HTTP authentication is enabled. - # auth-enabled = false - - # The default realm sent back when issuing a basic auth challenge. - # realm = "InfluxDB" - - # Determines whether HTTP request logging is enable.d - # log-enabled = true - - # Determines whether detailed write logging is enabled. - # write-tracing = false - - # Determines whether the pprof endpoint is enabled. This endpoint is used for - # troubleshooting and monitoring. - # pprof-enabled = true - - # Determines whether HTTPS is enabled. - # https-enabled = false - - # The SSL certificate to use when HTTPS is enabled. - # https-certificate = "/etc/ssl/influxdb.pem" - - # Use a separate private key location. - # https-private-key = "" - - # The JWT auth shared secret to validate requests using JSON web tokens. - # shared-sercret = "" - - # The default chunk size for result sets that should be chunked. - # max-row-limit = 10000 - - # The maximum number of HTTP connections that may be open at once. New connections that - # would exceed this limit are dropped. Setting this value to 0 disables the limit. - # max-connection-limit = 0 - - # Enable http service over unix domain socket - # unix-socket-enabled = false - - # The path of the unix domain socket. - # bind-socket = "/var/run/influxdb.sock" - -### -### [subscriber] -### -### Controls the subscriptions, which can be used to fork a copy of all data -### received by the InfluxDB host. -### - -[subscriber] - # Determines whether the subscriber service is enabled. - # enabled = true - - # The default timeout for HTTP writes to subscribers. - # http-timeout = "30s" - - # Allows insecure HTTPS connections to subscribers. This is useful when testing with self- - # signed certificates. - # insecure-skip-verify = false - - # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used - # ca-certs = "" - - # The number of writer goroutines processing the write channel. - # write-concurrency = 40 - - # The number of in-flight writes buffered in the write channel. - # write-buffer-size = 1000 - - -### -### [[graphite]] -### -### Controls one or many listeners for Graphite data. -### - -[[graphite]] - # Determines whether the graphite endpoint is enabled. - # enabled = false - # database = "graphite" - # retention-policy = "" - # bind-address = ":2003" - # protocol = "tcp" - # consistency-level = "one" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # Flush if this many points get buffered - # batch-size = 5000 - - # number of batches that may be pending in memory - # batch-pending = 10 - - # Flush at least this often even if we haven't hit buffer limit - # batch-timeout = "1s" - - # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. - # udp-read-buffer = 0 - - ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. - # separator = "." - - ### Default tags that will be added to all metrics. These can be overridden at the template level - ### or by tags extracted from metric - # tags = ["region=us-east", "zone=1c"] - - ### Each template line requires a template pattern. It can have an optional - ### filter before the template and separated by spaces. It can also have optional extra - ### tags following the template. Multiple tags should be separated by commas and no spaces - ### similar to the line protocol format. There can be only one default template. - # templates = [ - # "*.app env.service.resource.measurement", - # # Default template - # "server.*", - # ] - -### -### [collectd] -### -### Controls one or many listeners for collectd data. -### - -[[collectd]] - # enabled = false - # bind-address = ":25826" - # database = "collectd" - # retention-policy = "" - # - # The collectd service supports either scanning a directory for multiple types - # db files, or specifying a single db file. - # typesdb = "/usr/local/share/collectd" - # - # security-level = "none" - # auth-file = "/etc/collectd/auth_file" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # Flush if this many points get buffered - # batch-size = 5000 - - # Number of batches that may be pending in memory - # batch-pending = 10 - - # Flush at least this often even if we haven't hit buffer limit - # batch-timeout = "10s" - - # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. - # read-buffer = 0 - -### -### [opentsdb] -### -### Controls one or many listeners for OpenTSDB data. -### - -[[opentsdb]] - enabled = true - # bind-address = ":4242" - # database = "opentsdb" - # retention-policy = "" - # consistency-level = "one" - # tls-enabled = false - # certificate= "/etc/ssl/influxdb.pem" - - # Log an error for every malformed point. - # log-point-errors = true - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Only points - # metrics received over the telnet protocol undergo batching. - - # Flush if this many points get buffered - # batch-size = 1000 - - # Number of batches that may be pending in memory - # batch-pending = 5 - - # Flush at least this often even if we haven't hit buffer limit - # batch-timeout = "1s" - -### -### [[udp]] -### -### Controls the listeners for InfluxDB line protocol data via UDP. -### - -[[udp]] - # enabled = false - # bind-address = ":8089" - # database = "udp" - # retention-policy = "" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # Flush if this many points get buffered - # batch-size = 5000 - - # Number of batches that may be pending in memory - # batch-pending = 10 - - # Will flush at least this often even if we haven't hit buffer limit - # batch-timeout = "1s" - - # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. - # read-buffer = 0 - -### -### [continuous_queries] -### -### Controls how continuous queries are run within InfluxDB. -### - -[continuous_queries] - # Determiens whether the continuous query service is enabled. - # enabled = true - - # Controls whether queries are logged when executed by the CQ service. - # log-enabled = true - - # interval for how often continuous queries will be checked if they need to run - # run-interval = "1s" diff --git a/scripts/install b/scripts/install index 7e06f80..a6f32eb 100644 --- a/scripts/install +++ b/scripts/install @@ -92,8 +92,11 @@ ynh_add_nginx_config ynh_script_progression --message="Configuring Grafana and InfluxDB..." --weight=30 # If NetData is installed, configure it to feed InfluxDB -netdata_conf="/opt/netdata/etc/netdata/exporting.conf" -if [ -f "$netdata_conf" ] ; then +if [ -d "/opt/netdata/etc/netdata" ] ; then + netdata_conf="/opt/netdata/etc/netdata/exporting.conf" + if [ ! -f "$netdata_conf" ] ; then + cp "/opt/netdata/usr/lib/netdata/conf.d/exporting.conf" /opt/netdata/etc/netdata + fi sed -i '/^\[exporting:global\]$/,/^\[/ { s/enabled = no/enabled = yes/ s/# update every = 10/update every = 60/ @@ -119,12 +122,7 @@ EOF fi # Configure InfluxDB -if [ -f /etc/influxdb/influxdb.conf ] ; then - sed -i '/^\[\[opentsdb\]\]$/,/^\[/ s/enabled = false/enabled = true/' /etc/influxdb/influxdb.conf -else - [ -d /etc/influxdb ] || mkdir /etc/influxdb - cp ../conf/influxdb.conf /etc/influxdb -fi +sed -i '/^\[\[opentsdb\]\]$/,/^\[/ s/^.* enabled = false/enabled = true/' /etc/influxdb/influxdb.conf # Start InfluxDB server ynh_systemd_action --service_name=influxdb --action="restart" From a73295d6dec3e588d6db9fa1313a1551e7fa059d Mon Sep 17 00:00:00 2001 From: Jimmy Monin Date: Sat, 3 Sep 2022 19:04:01 +0200 Subject: [PATCH 3/6] Fix restoration --- scripts/restore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/restore b/scripts/restore index 2394b76..4a04cfb 100644 --- a/scripts/restore +++ b/scripts/restore @@ -80,7 +80,9 @@ ynh_restore_file --origin_path="/var/lib/grafana/plugins" --not_mandatory # Set permission with the new grafana user (id could have been changed) chown -R root:grafana "/etc/grafana" -chown -R grafana:grafana "/var/lib/grafana/plugins" +if [ -d "/var/lib/grafana/plugins" ]; then + chown -R grafana:grafana "/var/lib/grafana/plugins" +fi #================================================= # RESTORE THE INFLUXDB DATABASE From 58c8de920b69eddb611ba1efa9cf5180b5d4dd32 Mon Sep 17 00:00:00 2001 From: yunohost-bot Date: Sat, 3 Sep 2022 17:04:13 +0000 Subject: [PATCH 4/6] Auto-update README --- README.md | 2 +- README_fr.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ff9ebfe..45bc6a6 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ If you don't have YunoHost, please consult [the guide](https://yunohost.org/#/in Metric & analytic dashboards for monitoring -**Shipped version:** 8.3.3~ynh2 +**Shipped version:** 9.1.2~ynh1 **Demo:** https://play.grafana.org diff --git a/README_fr.md b/README_fr.md index ba89298..7ddd0bc 100644 --- a/README_fr.md +++ b/README_fr.md @@ -17,7 +17,7 @@ Si vous n'avez pas YunoHost, regardez [ici](https://yunohost.org/#/install) pour Tableaux de bords de supervision -**Version incluse :** 8.3.3~ynh2 +**Version incluse :** 9.1.2~ynh1 **Démo :** https://play.grafana.org From 363a234d89d473eeef5098fb36878739dc11b461 Mon Sep 17 00:00:00 2001 From: Jimmy Monin Date: Sat, 3 Sep 2022 19:28:45 +0200 Subject: [PATCH 5/6] Implement updater --- .github/workflows/updater.sh | 72 +++++++++++++++++++++++++++++++++++ .github/workflows/updater.yml | 50 ++++++++++++++++++++++++ manifest.json | 4 +- 3 files changed, 124 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/updater.sh create mode 100644 .github/workflows/updater.yml diff --git a/.github/workflows/updater.sh b/.github/workflows/updater.sh new file mode 100644 index 0000000..043e400 --- /dev/null +++ b/.github/workflows/updater.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +#================================================= +# PACKAGE UPDATING HELPER +#================================================= + +# This script is meant to be run by GitHub Actions +# The YunoHost-Apps organisation offers a template Action to run this script periodically +# Since each app is different, maintainers can adapt its contents so as to perform +# automatic actions when a new upstream release is detected. + +#================================================= +# FETCHING LATEST RELEASE AND ITS ASSETS +#================================================= + +# Fetching information +current_version=$(cat manifest.json | jq -j '.version|split("~")[0]') +repo=$(cat manifest.json | jq -j '.upstream.code|split("https://github.com/")[1]') +# Some jq magic is needed, because the latest upstream release is not always the latest version (e.g. security patches for older versions) +version=$(curl --silent "https://api.github.com/repos/$repo/releases" | jq -r '.[] | select( .prerelease != true ) | .tag_name' | sort -V | tail -1) + +# Later down the script, we assume the version has only digits and dots +# Sometimes the release name starts with a "v", so let's filter it out. +# You may need more tweaks here if the upstream repository has different naming conventions. +if [[ ${version:0:1} == "v" || ${version:0:1} == "V" ]]; then + version=${version:1} +fi + +# Setting up the environment variables +echo "Current version: $current_version" +echo "Latest release from upstream: $version" +echo "VERSION=$version" >> $GITHUB_ENV +echo "REPO=$repo" >> $GITHUB_ENV +# For the time being, let's assume the script will fail +echo "PROCEED=false" >> $GITHUB_ENV + +# Proceed only if the retrieved version is greater than the current one +if ! dpkg --compare-versions "$current_version" "lt" "$version" ; then + echo "::warning ::No new version available" + exit 0 +# Proceed only if a PR for this new version does not already exist +elif git ls-remote -q --exit-code --heads https://github.com/$GITHUB_REPOSITORY.git ci-auto-update-v$version ; then + echo "::warning ::A branch already exists for this update" + exit 0 +fi + +#================================================= +# UPDATE SOURCE FILES +#================================================= + +sed -i "s/GRAFANA_VERSION=.*/GRAFANA_VERSION=\"$version\"/" scripts/_common.sh +echo "... scripts/_common.sh updated" + +#================================================= +# SPECIFIC UPDATE STEPS +#================================================= + +# Any action on the app's source code can be done. +# The GitHub Action workflow takes care of committing all changes after this script ends. + +#================================================= +# GENERIC FINALIZATION +#================================================= + +# Replace new version in manifest +echo "$(jq -s --indent 4 ".[] | .version = \"$version~ynh1\"" manifest.json)" > manifest.json + +# No need to update the README, yunohost-bot takes care of it + +# The Action will proceed only if the PROCEED environment variable is set to true +echo "PROCEED=true" >> $GITHUB_ENV +exit 0 diff --git a/.github/workflows/updater.yml b/.github/workflows/updater.yml new file mode 100644 index 0000000..5f1dcc1 --- /dev/null +++ b/.github/workflows/updater.yml @@ -0,0 +1,50 @@ +# This workflow allows GitHub Actions to automagically update your app whenever a new upstream release is detected. +# You need to enable Actions in your repository settings, and fetch this Action from the YunoHost-Apps organization. +# This file should be enough by itself, but feel free to tune it to your needs. +# It calls updater.sh, which is where you should put the app-specific update steps. +name: Check for new upstream releases +on: + # Allow to manually trigger the workflow + workflow_dispatch: + # Run it every day at 6:00 UTC + schedule: + - cron: '0 6 * * *' +jobs: + updater: + runs-on: ubuntu-latest + steps: + - name: Fetch the source code + uses: actions/checkout@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + - name: Run the updater script + id: run_updater + run: | + # Setting up Git user + git config --global user.name 'yunohost-bot' + git config --global user.email 'yunohost-bot@users.noreply.github.com' + # Run the updater script + /bin/bash .github/workflows/updater.sh + - name: Commit changes + id: commit + if: ${{ env.PROCEED == 'true' }} + run: | + git commit -am "Upgrade to v$VERSION" + - name: Create Pull Request + id: cpr + if: ${{ env.PROCEED == 'true' }} + uses: peter-evans/create-pull-request@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + commit-message: Update to version ${{ env.VERSION }} + committer: 'yunohost-bot ' + author: 'yunohost-bot ' + signoff: false + base: testing + branch: ci-auto-update-v${{ env.VERSION }} + delete-branch: true + title: 'Upgrade to version ${{ env.VERSION }}' + body: | + Upgrade to v${{ env.VERSION }} + draft: false + diff --git a/manifest.json b/manifest.json index 876e46d..e5148e4 100644 --- a/manifest.json +++ b/manifest.json @@ -4,7 +4,7 @@ "packaging_format": 1, "description": { "en": "Metric & analytic dashboards for monitoring", - "fr": "Tableaux de bords de supervision" + "fr": "Tableaux de bord de supervision" }, "version": "9.1.2~ynh1", "url": "https://grafana.com/oss/grafana/", @@ -12,7 +12,7 @@ "license": "AGPL-3.0-only", "website": "https://grafana.com/", "demo": "https://play.grafana.org", - "code": "https://github.com/grafana/" + "code": "https://github.com/grafana/grafana" }, "license": "AGPL-3.0-only", "maintainer": { From 491b92d485f0b1d5f1c237ca9a51bbc957d333d1 Mon Sep 17 00:00:00 2001 From: yunohost-bot Date: Sat, 3 Sep 2022 17:28:57 +0000 Subject: [PATCH 6/6] Auto-update README --- README.md | 2 +- README_fr.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 45bc6a6..76592ae 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ LDAP and HTTP auth are supported. ## Documentation and resources * Official app website: -* Upstream app code repository: +* Upstream app code repository: * YunoHost documentation for this app: * Report a bug: diff --git a/README_fr.md b/README_fr.md index 7ddd0bc..38e6a4f 100644 --- a/README_fr.md +++ b/README_fr.md @@ -15,7 +15,7 @@ Si vous n'avez pas YunoHost, regardez [ici](https://yunohost.org/#/install) pour ## Vue d'ensemble -Tableaux de bords de supervision +Tableaux de bord de supervision **Version incluse :** 9.1.2~ynh1 @@ -67,7 +67,7 @@ LDAP and HTTP auth are supported. ## Documentations et ressources * Site officiel de l'app : -* Dépôt de code officiel de l'app : +* Dépôt de code officiel de l'app : * Documentation YunoHost pour cette app : * Signaler un bug :