1
0
Fork 0
mirror of https://github.com/YunoHost-Apps/grafana_ynh.git synced 2024-09-03 20:36:29 +02:00

Merge pull request #54 from YunoHost-Apps/enh_update_9.1.2

This commit is contained in:
JimboJoe 2022-09-04 09:08:21 +02:00 committed by GitHub
commit 14bf5a6d58
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 140 additions and 457 deletions

72
.github/workflows/updater.sh vendored Normal file
View file

@ -0,0 +1,72 @@
#!/bin/bash
#=================================================
# PACKAGE UPDATING HELPER
#=================================================
# This script is meant to be run by GitHub Actions
# The YunoHost-Apps organisation offers a template Action to run this script periodically
# Since each app is different, maintainers can adapt its contents so as to perform
# automatic actions when a new upstream release is detected.
#=================================================
# FETCHING LATEST RELEASE AND ITS ASSETS
#=================================================
# Fetching information
current_version=$(cat manifest.json | jq -j '.version|split("~")[0]')
repo=$(cat manifest.json | jq -j '.upstream.code|split("https://github.com/")[1]')
# Some jq magic is needed, because the latest upstream release is not always the latest version (e.g. security patches for older versions)
version=$(curl --silent "https://api.github.com/repos/$repo/releases" | jq -r '.[] | select( .prerelease != true ) | .tag_name' | sort -V | tail -1)
# Later down the script, we assume the version has only digits and dots
# Sometimes the release name starts with a "v", so let's filter it out.
# You may need more tweaks here if the upstream repository has different naming conventions.
if [[ ${version:0:1} == "v" || ${version:0:1} == "V" ]]; then
version=${version:1}
fi
# Setting up the environment variables
echo "Current version: $current_version"
echo "Latest release from upstream: $version"
echo "VERSION=$version" >> $GITHUB_ENV
echo "REPO=$repo" >> $GITHUB_ENV
# For the time being, let's assume the script will fail
echo "PROCEED=false" >> $GITHUB_ENV
# Proceed only if the retrieved version is greater than the current one
if ! dpkg --compare-versions "$current_version" "lt" "$version" ; then
echo "::warning ::No new version available"
exit 0
# Proceed only if a PR for this new version does not already exist
elif git ls-remote -q --exit-code --heads https://github.com/$GITHUB_REPOSITORY.git ci-auto-update-v$version ; then
echo "::warning ::A branch already exists for this update"
exit 0
fi
#=================================================
# UPDATE SOURCE FILES
#=================================================
sed -i "s/GRAFANA_VERSION=.*/GRAFANA_VERSION=\"$version\"/" scripts/_common.sh
echo "... scripts/_common.sh updated"
#=================================================
# SPECIFIC UPDATE STEPS
#=================================================
# Any action on the app's source code can be done.
# The GitHub Action workflow takes care of committing all changes after this script ends.
#=================================================
# GENERIC FINALIZATION
#=================================================
# Replace new version in manifest
echo "$(jq -s --indent 4 ".[] | .version = \"$version~ynh1\"" manifest.json)" > manifest.json
# No need to update the README, yunohost-bot takes care of it
# The Action will proceed only if the PROCEED environment variable is set to true
echo "PROCEED=true" >> $GITHUB_ENV
exit 0

50
.github/workflows/updater.yml vendored Normal file
View file

@ -0,0 +1,50 @@
# This workflow allows GitHub Actions to automagically update your app whenever a new upstream release is detected.
# You need to enable Actions in your repository settings, and fetch this Action from the YunoHost-Apps organization.
# This file should be enough by itself, but feel free to tune it to your needs.
# It calls updater.sh, which is where you should put the app-specific update steps.
name: Check for new upstream releases
on:
# Allow to manually trigger the workflow
workflow_dispatch:
# Run it every day at 6:00 UTC
schedule:
- cron: '0 6 * * *'
jobs:
updater:
runs-on: ubuntu-latest
steps:
- name: Fetch the source code
uses: actions/checkout@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Run the updater script
id: run_updater
run: |
# Setting up Git user
git config --global user.name 'yunohost-bot'
git config --global user.email 'yunohost-bot@users.noreply.github.com'
# Run the updater script
/bin/bash .github/workflows/updater.sh
- name: Commit changes
id: commit
if: ${{ env.PROCEED == 'true' }}
run: |
git commit -am "Upgrade to v$VERSION"
- name: Create Pull Request
id: cpr
if: ${{ env.PROCEED == 'true' }}
uses: peter-evans/create-pull-request@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: Update to version ${{ env.VERSION }}
committer: 'yunohost-bot <yunohost-bot@users.noreply.github.com>'
author: 'yunohost-bot <yunohost-bot@users.noreply.github.com>'
signoff: false
base: testing
branch: ci-auto-update-v${{ env.VERSION }}
delete-branch: true
title: 'Upgrade to version ${{ env.VERSION }}'
body: |
Upgrade to v${{ env.VERSION }}
draft: false

View file

@ -17,7 +17,7 @@ If you don't have YunoHost, please consult [the guide](https://yunohost.org/#/in
Metric & analytic dashboards for monitoring
**Shipped version:** 8.3.3~ynh2
**Shipped version:** 9.1.2~ynh1
**Demo:** https://play.grafana.org
@ -67,7 +67,7 @@ LDAP and HTTP auth are supported.
## Documentation and resources
* Official app website: <https://grafana.com/>
* Upstream app code repository: <https://github.com/grafana/>
* Upstream app code repository: <https://github.com/grafana/grafana>
* YunoHost documentation for this app: <https://yunohost.org/app_grafana>
* Report a bug: <https://github.com/YunoHost-Apps/grafana_ynh/issues>

View file

@ -15,9 +15,9 @@ Si vous n'avez pas YunoHost, regardez [ici](https://yunohost.org/#/install) pour
## Vue d'ensemble
Tableaux de bords de supervision
Tableaux de bord de supervision
**Version incluse :** 8.3.3~ynh2
**Version incluse :** 9.1.2~ynh1
**Démo :** https://play.grafana.org
@ -67,7 +67,7 @@ LDAP and HTTP auth are supported.
## Documentations et ressources
* Site officiel de l'app : <https://grafana.com/>
* Dépôt de code officiel de l'app : <https://github.com/grafana/>
* Dépôt de code officiel de l'app : <https://github.com/grafana/grafana>
* Documentation YunoHost pour cette app : <https://yunohost.org/app_grafana>
* Signaler un bug : <https://github.com/YunoHost-Apps/grafana_ynh/issues>

View file

@ -1,439 +0,0 @@
### Welcome to the InfluxDB configuration file.
# The values in this file override the default values used by the system if
# a config option is not specified. The commented out lines are the the configuration
# field and the default value used. Uncommentting a line and changing the value
# will change the value used at runtime when the process is restarted.
# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
# The data includes a random ID, os, arch, version, the number of series and other
# usage data. No data from user databases is ever transmitted.
# Change this option to true to disable reporting.
# reporting-disabled = false
# we'll try to get the hostname automatically, but if it the os returns something
# that isn't resolvable by other servers in the cluster, use this option to
# manually set the hostname
# hostname = "localhost"
###
### [meta]
###
### Controls the parameters for the Raft consensus group that stores metadata
### about the InfluxDB cluster.
###
[meta]
# Where the metadata/raft database is stored
dir = "/var/lib/influxdb/meta"
# Automatically create a default retention policy when creating a database.
# retention-autocreate = true
# If log messages are printed for the meta service
# logging-enabled = true
###
### [data]
###
### Controls where the actual shard data for InfluxDB lives and how it is
### flushed from the WAL. "dir" may need to be changed to a suitable place
### for your system, but the WAL settings are an advanced configuration. The
### defaults should work for most systems.
###
[data]
# The directory where the TSM storage engine stores TSM files.
dir = "/var/lib/influxdb/data"
# The directory where the TSM storage engine stores WAL files.
wal-dir = "/var/lib/influxdb/wal"
# Trace logging provides more verbose output around the tsm engine. Turning
# this on can provide more useful output for debugging tsm engine issues.
# trace-logging-enabled = false
# Whether queries should be logged before execution. Very useful for troubleshooting, but will
# log any sensitive data contained within a query.
# query-log-enabled = true
# Settings for the TSM engine
# CacheMaxMemorySize is the maximum size a shard's cache can
# reach before it starts rejecting writes.
# cache-max-memory-size = 1048576000
# CacheSnapshotMemorySize is the size at which the engine will
# snapshot the cache and write it to a TSM file, freeing up memory
# cache-snapshot-memory-size = 26214400
# CacheSnapshotWriteColdDuration is the length of time at
# which the engine will snapshot the cache and write it to
# a new TSM file if the shard hasn't received writes or deletes
# cache-snapshot-write-cold-duration = "10m"
# CompactFullWriteColdDuration is the duration at which the engine
# will compact all TSM files in a shard if it hasn't received a
# write or delete
# compact-full-write-cold-duration = "4h"
# The maximum series allowed per database before writes are dropped. This limit can prevent
# high cardinality issues at the database level. This limit can be disabled by setting it to
# 0.
# max-series-per-database = 1000000
# The maximum number of tag values per tag that are allowed before writes are dropped. This limit
# can prevent high cardinality tag values from being written to a measurement. This limit can be
# disabled by setting it to 0.
# max-values-per-tag = 100000
###
### [coordinator]
###
### Controls the clustering service configuration.
###
[coordinator]
# The default time a write request will wait until a "timeout" error is returned to the caller.
# write-timeout = "10s"
# The maximum number of concurrent queries allowed to be executing at one time. If a query is
# executed and exceeds this limit, an error is returned to the caller. This limit can be disabled
# by setting it to 0.
# max-concurrent-queries = 0
# The maximum time a query will is allowed to execute before being killed by the system. This limit
# can help prevent run away queries. Setting the value to 0 disables the limit.
# query-timeout = "0s"
# The the time threshold when a query will be logged as a slow query. This limit can be set to help
# discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging.
# log-queries-after = "0s"
# The maximum number of points a SELECT can process. A value of 0 will make the maximum
# point count unlimited.
# max-select-point = 0
# The maximum number of series a SELECT can run. A value of 0 will make the maximum series
# count unlimited.
# The maximum number of series a SELECT can run. A value of zero will make the maximum series
# count unlimited.
# max-select-series = 0
# The maxium number of group by time bucket a SELECt can create. A value of zero will max the maximum
# number of buckets unlimited.
# max-select-buckets = 0
###
### [retention]
###
### Controls the enforcement of retention policies for evicting old data.
###
[retention]
# Determines whether retention policy enforcment enabled.
# enabled = true
# The interval of time when retention policy enforcement checks run.
# check-interval = "30m"
###
### [shard-precreation]
###
### Controls the precreation of shards, so they are available before data arrives.
### Only shards that, after creation, will have both a start- and end-time in the
### future, will ever be created. Shards are never precreated that would be wholly
### or partially in the past.
[shard-precreation]
# Determines whether shard pre-creation service is enabled.
# enabled = true
# The interval of time when the check to pre-create new shards runs.
# check-interval = "10m"
# The default period ahead of the endtime of a shard group that its successor
# group is created.
# advance-period = "30m"
###
### Controls the system self-monitoring, statistics and diagnostics.
###
### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database.
[monitor]
# Whether to record statistics internally.
# store-enabled = true
# The destination database for recorded statistics
# store-database = "_internal"
# The interval at which to record statistics
# store-interval = "10s"
###
### [admin]
###
### Controls the availability of the built-in, web-based admin interface. If HTTPS is
### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
###
### NOTE: This interface is deprecated as of 1.1.0 and will be removed in a future release.
[admin]
# Determines whether the admin service is enabled.
# enabled = false
# The default bind address used by the admin service.
# bind-address = ":8083"
# Whether the admin service should use HTTPS.
# https-enabled = false
# The SSL certificate used when HTTPS is enabled.
# https-certificate = "/etc/ssl/influxdb.pem"
###
### [http]
###
### Controls how the HTTP endpoints are configured. These are the primary
### mechanism for getting data into and out of InfluxDB.
###
[http]
# Determines whether HTTP endpoint is enabled.
# enabled = true
# The bind address used by the HTTP service.
# bind-address = ":8086"
# Determines whether HTTP authentication is enabled.
# auth-enabled = false
# The default realm sent back when issuing a basic auth challenge.
# realm = "InfluxDB"
# Determines whether HTTP request logging is enable.d
# log-enabled = true
# Determines whether detailed write logging is enabled.
# write-tracing = false
# Determines whether the pprof endpoint is enabled. This endpoint is used for
# troubleshooting and monitoring.
# pprof-enabled = true
# Determines whether HTTPS is enabled.
# https-enabled = false
# The SSL certificate to use when HTTPS is enabled.
# https-certificate = "/etc/ssl/influxdb.pem"
# Use a separate private key location.
# https-private-key = ""
# The JWT auth shared secret to validate requests using JSON web tokens.
# shared-sercret = ""
# The default chunk size for result sets that should be chunked.
# max-row-limit = 10000
# The maximum number of HTTP connections that may be open at once. New connections that
# would exceed this limit are dropped. Setting this value to 0 disables the limit.
# max-connection-limit = 0
# Enable http service over unix domain socket
# unix-socket-enabled = false
# The path of the unix domain socket.
# bind-socket = "/var/run/influxdb.sock"
###
### [subscriber]
###
### Controls the subscriptions, which can be used to fork a copy of all data
### received by the InfluxDB host.
###
[subscriber]
# Determines whether the subscriber service is enabled.
# enabled = true
# The default timeout for HTTP writes to subscribers.
# http-timeout = "30s"
# Allows insecure HTTPS connections to subscribers. This is useful when testing with self-
# signed certificates.
# insecure-skip-verify = false
# The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used
# ca-certs = ""
# The number of writer goroutines processing the write channel.
# write-concurrency = 40
# The number of in-flight writes buffered in the write channel.
# write-buffer-size = 1000
###
### [[graphite]]
###
### Controls one or many listeners for Graphite data.
###
[[graphite]]
# Determines whether the graphite endpoint is enabled.
# enabled = false
# database = "graphite"
# retention-policy = ""
# bind-address = ":2003"
# protocol = "tcp"
# consistency-level = "one"
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# Flush if this many points get buffered
# batch-size = 5000
# number of batches that may be pending in memory
# batch-pending = 10
# Flush at least this often even if we haven't hit buffer limit
# batch-timeout = "1s"
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
# udp-read-buffer = 0
### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
# separator = "."
### Default tags that will be added to all metrics. These can be overridden at the template level
### or by tags extracted from metric
# tags = ["region=us-east", "zone=1c"]
### Each template line requires a template pattern. It can have an optional
### filter before the template and separated by spaces. It can also have optional extra
### tags following the template. Multiple tags should be separated by commas and no spaces
### similar to the line protocol format. There can be only one default template.
# templates = [
# "*.app env.service.resource.measurement",
# # Default template
# "server.*",
# ]
###
### [collectd]
###
### Controls one or many listeners for collectd data.
###
[[collectd]]
# enabled = false
# bind-address = ":25826"
# database = "collectd"
# retention-policy = ""
#
# The collectd service supports either scanning a directory for multiple types
# db files, or specifying a single db file.
# typesdb = "/usr/local/share/collectd"
#
# security-level = "none"
# auth-file = "/etc/collectd/auth_file"
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# Flush if this many points get buffered
# batch-size = 5000
# Number of batches that may be pending in memory
# batch-pending = 10
# Flush at least this often even if we haven't hit buffer limit
# batch-timeout = "10s"
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
# read-buffer = 0
###
### [opentsdb]
###
### Controls one or many listeners for OpenTSDB data.
###
[[opentsdb]]
enabled = true
# bind-address = ":4242"
# database = "opentsdb"
# retention-policy = ""
# consistency-level = "one"
# tls-enabled = false
# certificate= "/etc/ssl/influxdb.pem"
# Log an error for every malformed point.
# log-point-errors = true
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Only points
# metrics received over the telnet protocol undergo batching.
# Flush if this many points get buffered
# batch-size = 1000
# Number of batches that may be pending in memory
# batch-pending = 5
# Flush at least this often even if we haven't hit buffer limit
# batch-timeout = "1s"
###
### [[udp]]
###
### Controls the listeners for InfluxDB line protocol data via UDP.
###
[[udp]]
# enabled = false
# bind-address = ":8089"
# database = "udp"
# retention-policy = ""
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# Flush if this many points get buffered
# batch-size = 5000
# Number of batches that may be pending in memory
# batch-pending = 10
# Will flush at least this often even if we haven't hit buffer limit
# batch-timeout = "1s"
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
# read-buffer = 0
###
### [continuous_queries]
###
### Controls how continuous queries are run within InfluxDB.
###
[continuous_queries]
# Determiens whether the continuous query service is enabled.
# enabled = true
# Controls whether queries are logged when executed by the CQ service.
# log-enabled = true
# interval for how often continuous queries will be checked if they need to run
# run-interval = "1s"

View file

@ -4,15 +4,15 @@
"packaging_format": 1,
"description": {
"en": "Metric & analytic dashboards for monitoring",
"fr": "Tableaux de bords de supervision"
"fr": "Tableaux de bord de supervision"
},
"version": "8.3.3~ynh2",
"version": "9.1.2~ynh1",
"url": "https://grafana.com/oss/grafana/",
"upstream": {
"license": "AGPL-3.0-only",
"website": "https://grafana.com/",
"demo": "https://play.grafana.org",
"code": "https://github.com/grafana/"
"code": "https://github.com/grafana/grafana"
},
"license": "AGPL-3.0-only",
"maintainer": {

View file

@ -5,7 +5,7 @@
#=================================================
# Debian package version for Grafana
GRAFANA_VERSION="8.3.3"
GRAFANA_VERSION="9.1.2"
# dependencies used by the app
pkg_dependencies="influxdb"

View file

@ -92,8 +92,11 @@ ynh_add_nginx_config
ynh_script_progression --message="Configuring Grafana and InfluxDB..." --weight=30
# If NetData is installed, configure it to feed InfluxDB
netdata_conf="/opt/netdata/etc/netdata/exporting.conf"
if [ -f "$netdata_conf" ] ; then
if [ -d "/opt/netdata/etc/netdata" ] ; then
netdata_conf="/opt/netdata/etc/netdata/exporting.conf"
if [ ! -f "$netdata_conf" ] ; then
cp "/opt/netdata/usr/lib/netdata/conf.d/exporting.conf" /opt/netdata/etc/netdata
fi
sed -i '/^\[exporting:global\]$/,/^\[/ {
s/enabled = no/enabled = yes/
s/# update every = 10/update every = 60/
@ -119,12 +122,7 @@ EOF
fi
# Configure InfluxDB
if [ -f /etc/influxdb/influxdb.conf ] ; then
sed -i '/^\[\[opentsdb\]\]$/,/^\[/ s/enabled = false/enabled = true/' /etc/influxdb/influxdb.conf
else
[ -d /etc/influxdb ] || mkdir /etc/influxdb
cp ../conf/influxdb.conf /etc/influxdb
fi
sed -i '/^\[\[opentsdb\]\]$/,/^\[/ s/^.* enabled = false/enabled = true/' /etc/influxdb/influxdb.conf
# Start InfluxDB server
ynh_systemd_action --service_name=influxdb --action="restart"

View file

@ -80,7 +80,9 @@ ynh_restore_file --origin_path="/var/lib/grafana/plugins" --not_mandatory
# Set permission with the new grafana user (id could have been changed)
chown -R root:grafana "/etc/grafana"
chown -R grafana:grafana "/var/lib/grafana/plugins"
if [ -d "/var/lib/grafana/plugins" ]; then
chown -R grafana:grafana "/var/lib/grafana/plugins"
fi
#=================================================
# RESTORE THE INFLUXDB DATABASE