1
0
Fork 0
mirror of https://github.com/YunoHost-Apps/nextcloud_ynh.git synced 2024-09-03 19:55:57 +02:00
nextcloud_ynh/scripts/_common.sh
Rafi59 5d81287933 Update to nextcloud 12 (closes #36 and #38) (#37)
* Update to nextcloud 12
* Add correct rights
* Fix temp directory rights on upgrade
* Add server tuning
* Update php-fpm.conf
* Add new helper for fpm config
* [fix] Opcache
* [fix] nginx SAMEORIGIN
2017-08-03 17:26:39 +02:00

274 lines
9.3 KiB
Bash

#
# Common variables
#
APPNAME="nextcloud"
# Nextcloud version
LAST_VERSION=$(grep "VERSION=" "upgrade.d/upgrade.last.sh" | cut -d= -f2)
# Package name for Nextcloud dependencies
DEPS_PKG_NAME="nextcloud-deps"
# App package root directory should be the parent folder
PKGDIR=$(cd ../; pwd)
#
# Common helpers
#
# Download and extract Nextcloud sources to the given directory
# usage: extract_nextcloud DESTDIR [AS_USER]
extract_nextcloud() {
# Remote URL to fetch Nextcloud tarball
NEXTCLOUD_SOURCE_URL="https://download.nextcloud.com/server/releases/nextcloud-${VERSION}.tar.bz2"
local DESTDIR=$1
local AS_USER=${2:-admin}
# retrieve and extract Roundcube tarball
nc_tarball="/tmp/nextcloud.tar.bz2"
rm -f "$nc_tarball"
wget -q -O "$nc_tarball" "$NEXTCLOUD_SOURCE_URL" \
|| ynh_die "Unable to download Nextcloud tarball"
echo "$NEXTCLOUD_SOURCE_SHA256 $nc_tarball" | sha256sum -c >/dev/null \
|| ynh_die "Invalid checksum of downloaded tarball"
exec_as "$AS_USER" tar xjf "$nc_tarball" -C "$DESTDIR" --strip-components 1 \
|| ynh_die "Unable to extract Nextcloud tarball"
rm -f "$nc_tarball"
# apply patches
(cd "$DESTDIR" \
&& for p in ${PKGDIR}/patches/*.patch; do \
exec_as "$AS_USER" patch -p1 < $p; done) \
|| ynh_die "Unable to apply patches to Nextcloud"
}
# Execute a command as another user
# usage: exec_as USER COMMAND [ARG ...]
exec_as() {
local USER=$1
shift 1
if [[ $USER = $(whoami) ]]; then
eval "$@"
else
# use sudo twice to be root and be allowed to use another user
sudo sudo -u "$USER" "$@"
fi
}
# Execute a command with occ as a given user from a given directory
# usage: exec_occ WORKDIR AS_USER COMMAND [ARG ...]
exec_occ() {
local WORKDIR=$1
local AS_USER=$2
shift 2
(cd "$WORKDIR" && exec_as "$AS_USER" \
php occ --no-interaction --no-ansi "$@")
}
# Create the external storage for the home folders and enable sharing
# usage: create_home_external_storage OCC_COMMAND
create_home_external_storage() {
local OCC=$1
local mount_id=`$OCC files_external:create --output=json \
'Home' 'local' 'null::null' -c 'datadir=/home/$user' || true`
! [[ $mount_id =~ ^[0-9]+$ ]] \
&& echo "Unable to create external storage" 1>&2 \
|| $OCC files_external:option "$mount_id" enable_sharing true
}
# Check if an URL is already handled
# usage: is_url_handled URL
is_url_handled() {
local OUTPUT=($(curl -k -s -o /dev/null \
-w 'x%{redirect_url} %{http_code}' "$1"))
# it's handled if it does not redirect to the SSO nor return 404
[[ ! ${OUTPUT[0]} =~ \/yunohost\/sso\/ && ${OUTPUT[1]} != 404 ]]
}
# Rename a MySQL database and user
# usage: rename_mysql_db DBNAME DBUSER DBPASS NEW_DBNAME NEW_DBUSER
rename_mysql_db() {
local DBNAME=$1 DBUSER=$2 DBPASS=$3 NEW_DBNAME=$4 NEW_DBUSER=$5
local SQLPATH="/tmp/${DBNAME}-$(date '+%s').sql"
# dump the old database
mysqldump -u "$DBUSER" -p"$DBPASS" --no-create-db "$DBNAME" > "$SQLPATH"
# create the new database and user
ynh_mysql_create_db "$NEW_DBNAME" "$NEW_DBUSER" "$DBPASS"
ynh_mysql_connect_as "$NEW_DBUSER" "$DBPASS" "$NEW_DBNAME" < "$SQLPATH"
# remove the old database
ynh_mysql_drop_db "$DBNAME"
ynh_mysql_drop_user "$DBUSER"
rm "$SQLPATH"
}
SECURE_REMOVE () { # Suppression de dossier avec vérification des variables
chaine="$1" # L'argument doit être donné entre quotes simple '', pour éviter d'interpréter les variables.
no_var=0
while (echo "$chaine" | grep -q '\$') # Boucle tant qu'il y a des $ dans la chaine
do
no_var=1
global_var=$(echo "$chaine" | cut -d '$' -f 2) # Isole la première variable trouvée.
only_var=\$$(expr "$global_var" : '\([A-Za-z0-9_]*\)') # Isole complètement la variable en ajoutant le $ au début et en gardant uniquement le nom de la variable. Se débarrasse surtout du / et d'un éventuel chemin derrière.
real_var=$(eval "echo ${only_var}") # `eval "echo ${var}` permet d'interpréter une variable contenue dans une variable.
if test -z "$real_var" || [ "$real_var" = "/" ]; then
echo "Variable $only_var is empty, suppression of $chaine cancelled." >&2
return 1
fi
chaine=$(echo "$chaine" | sed "s@$only_var@$real_var@") # remplace la variable par sa valeur dans la chaine.
done
if [ "$no_var" -eq 1 ]
then
if [ -e "$chaine" ]; then
echo "Delete directory $chaine"
sudo rm -rf "$chaine"
fi
return 0
else
echo "No detected variable." >&2
return 1
fi
}
#=================================================
# FUTURE YUNOHOST HELPERS - TO BE REMOVED LATER
#=================================================
# Use logrotate to manage the logfile
#
# usage: ynh_use_logrotate [logfile]
# | arg: logfile - absolute path of logfile
#
# If no argument provided, a standard directory will be use. /var/log/${app}
# You can provide a path with the directory only or with the logfile.
# /parentdir/logdir/
# /parentdir/logdir/logfile.log
#
# It's possible to use this helper several times, each config will added to same logrotate config file.
ynh_use_logrotate () {
if [ "$#" -gt 0 ]; then
if [ "$(echo ${1##*.})" == "log" ]; then # Keep only the extension to check if it's a logfile
logfile=$1 # In this case, focus logrotate on the logfile
else
logfile=$1/.log # Else, uses the directory and all logfile into it.
fi
else
logfile="/var/log/${app}/.log" # Without argument, use a defaut directory in /var/log
fi
cat > ./${app}-logrotate << EOF # Build a config file for logrotate
$logfile {
# Rotate if the logfile exceeds 100Mo
size 100M
# Keep 12 old log maximum
rotate 12
# Compress the logs with gzip
compress
# Compress the log at the next cycle. So keep always 2 non compressed logs
delaycompress
# Copy and truncate the log to allow to continue write on it. Instead of move the log.
copytruncate
# Do not do an error if the log is missing
missingok
# Not rotate if the log is empty
notifempty
# Keep old logs in the same dir
noolddir
}
EOF
sudo mkdir -p $(dirname "$logfile") # Create the log directory, if not exist
cat ${app}-logrotate | sudo tee -a /etc/logrotate.d/$app > /dev/null # Append this config to the others for this app. If a config file already exist
}
# Remove the app's logrotate config.
#
# usage: ynh_remove_logrotate
ynh_remove_logrotate () {
if [ -e "/etc/logrotate.d/$app" ]; then
sudo rm "/etc/logrotate.d/$app"
fi
}
# Calculate and store a file checksum into the app settings
#
# $app should be defined when calling this helper
#
# usage: ynh_store_file_checksum file
# | arg: file - The file on which the checksum will performed, then stored.
ynh_store_file_checksum () {
local checksum_setting_name=checksum_${1//[\/ ]/_} # Replace all '/' and ' ' by '_'
ynh_app_setting_set $app $checksum_setting_name $(sudo md5sum "$1" | cut -d' ' -f1)
}
# Verify the checksum and backup the file if it's different
# This helper is primarily meant to allow to easily backup personalised/manually
# modified config files.
#
# $app should be defined when calling this helper
#
# usage: ynh_backup_if_checksum_is_different file [compress]
# | arg: file - The file on which the checksum test will be perfomed.
# | arg: compress - 1 to compress the backup instead of a simple copy
# A compression is needed for a file which will be analyzed even if its name is different.
#
# | ret: Return the name a the backup file, or nothing
ynh_backup_if_checksum_is_different () {
local file=$1
local compress_backup=${2:-0} # If $2 is empty, compress_backup will set at 0
local checksum_setting_name=checksum_${file//[\/ ]/_} # Replace all '/' and ' ' by '_'
local checksum_value=$(ynh_app_setting_get $app $checksum_setting_name)
if [ -n "$checksum_value" ]
then # Proceed only if a value was stored into the app settings
if ! echo "$checksum_value $file" | sudo md5sum -c --status
then # If the checksum is now different
backup_file="$file.backup.$(date '+%d.%m.%y_%Hh%M,%Ss')"
if [ $compress_backup -eq 1 ]
then
sudo tar --create --gzip --file "$backup_file.tar.gz" "$file" # Backup the current file and compress
backup_file="$backup_file.tar.gz"
else
sudo cp -a "$file" "$backup_file" # Backup the current file
fi
echo "File $file has been manually modified since the installation or last upgrade. So it has been duplicated in $backup_file" >&2
echo "$backup_file" # Return the name of the backup file
fi
fi
}
# Create a dedicated php-fpm config
final_path=$1
# usage: ynh_add_fpm_config
ynh_add_fpm_config () {
finalphpconf="/etc/php5/fpm/pool.d/$app.conf"
ynh_backup_if_checksum_is_different "$finalphpconf" 1
sudo cp ../conf/php-fpm.conf "$finalphpconf"
ynh_replace_string "__NAMETOCHANGE__" "$app" "$finalphpconf"
ynh_replace_string "__FINALPATH__" "$final_path" "$finalphpconf"
ynh_replace_string "__USER__" "$app" "$finalphpconf"
sudo chown root: "$finalphpconf"
ynh_store_file_checksum "$finalphpconf"
if [ -e "../conf/php-fpm.ini" ]
then
finalphpini="/etc/php5/fpm/conf.d/20-$app.ini"
ynh_backup_if_checksum_is_different "$finalphpini" 1
sudo cp ../conf/php-fpm.ini "$finalphpini"
sudo chown root: "$finalphpini"
ynh_store_file_checksum "$finalphpini"
fi
sudo systemctl reload php5-fpm
}
# Remove the dedicated php-fpm config
#
# usage: ynh_remove_fpm_config
ynh_remove_fpm_config () {
ynh_secure_remove "/etc/php5/fpm/pool.d/$app.conf"
ynh_secure_remove "/etc/php5/fpm/conf.d/20-$app.ini" 2>&1
sudo systemctl reload php5-fpm
}