1
0
Fork 0
mirror of https://github.com/YunoHost-Apps/synapse_ynh.git synced 2024-09-03 20:26:38 +02:00

Merge branch 'testing'

This commit is contained in:
Josué Tille 2020-04-27 11:35:03 +02:00
commit aba3c17e0a
No known key found for this signature in database
GPG key ID: 716A6C99B04194EF
17 changed files with 621 additions and 1008 deletions

View file

@ -14,7 +14,7 @@ Instant messaging server matrix network.
Yunohost chatroom with matrix : [https://riot.im/app/#/room/#yunohost:matrix.org](https://riot.im/app/#/room/#yunohost:matrix.org)
**Shipped version:** 1.11.0
**Shipped version:** 1.12.3
## Configuration
@ -46,7 +46,7 @@ _matrix._tcp.example.com. 3600 IN SRV 10 0 SYNAPSE_PORT synapse.exam
```
You need to replace SYNAPSE_PORT by the real port. This port can be obtained by the command: `yunohost app setting SYNAPSE_INSTANCE_NAME synapse_tls_port`
For more details, see : https://github.com/matrix-org/synapse#setting-up-federation
For more details, see : https://github.com/matrix-org/synapse/blob/master/docs/federate.md
If it is not automatically done, you need to open this in your ISP box.

View file

@ -13,7 +13,7 @@
setup_public=1
upgrade=1
upgrade=1 from_commit=db374d2bff981d2660ebdac52ee77c684383c00d
upgrade=1 from_commit=0b8f2d4423bc48a1509dc0be4e231f02a0046eca
upgrade=1 from_commit=0571cc85334408a98f6766881b580a040fa6398a
backup_restore=1
multi_instance=1
incorrect_path=0
@ -24,5 +24,5 @@
;;; Upgrade options
; commit=db374d2bff981d2660ebdac52ee77c684383c00d
name=Fix postgresql helper from old_version_for_CI_2 branch
; commit=0b8f2d4423bc48a1509dc0be4e231f02a0046eca
name=Migrate from self signed certificate to cert managed by Yunohost
; commit=0571cc85334408a98f6766881b580a040fa6398a
name=Migrate from self signed certificate to cert managed by Yunohost from old_version_for_CI_3 branch

View file

@ -1,11 +1,28 @@
import json
import sys
domain = sys.argv[1]
server_name = sys.argv[2]
with open("/etc/ssowat/conf.json.persistent", "r", encoding='utf-8') as jsonFile:
data = json.load(jsonFile)
if "skipped_urls" in data:
data["skipped_urls"].append("/_matrix")
else:
data["skipped_urls"] = ["/_matrix"]
if "skipped_urls" not in data:
data["skipped_urls"] = []
if "protected_urls" not in data:
data["protected_urls"] = []
# Remove entry without the domain specified
if "/_matrix" in data["skipped_urls"]:
data["skipped_urls"].remove("/_matrix")
if domain + "/_matrix" not in data["skipped_urls"]:
data["skipped_urls"].append(domain + "/_matrix")
if server_name + "/.well-known/matrix/" not in data["skipped_urls"]:
data["skipped_urls"].append(server_name + "/.well-known/matrix/")
if domain + "/_matrix/cas_server.php/login" not in data["protected_urls"]:
data["protected_urls"].append(domain + "/_matrix/cas_server.php/login")
with open("/etc/ssowat/conf.json.persistent", "w", encoding='utf-8') as jsonFile:
jsonFile.write(json.dumps(data, indent=4, sort_keys=True))
jsonFile.write(json.dumps(data, indent=4, sort_keys=True))

View file

@ -1,5 +1,5 @@
SOURCE_URL=https://github.com/YunoHost-Apps/synapse_python_build/releases/download/v1.11.0/matrix-synapse_1.11.0-stretch-bin1_armv7l.tar.gz
SOURCE_SUM=8a4f13bdbe429c94df3aab07d289a61f5f2e372d45cda21ea9d297f1cc224ece
SOURCE_URL=https://github.com/YunoHost-Apps/synapse_python_build/releases/download/v1.12.3/matrix-synapse_1.12.3-stretch-bin1_armv7l.tar.gz
SOURCE_SUM=b18e314d312ccd69fa7aec295374515cd669d62041bf7ee1a304bc77105f9eda
# (Optional) Program to check the integrity (sha256sum, md5sum...)
# default: sha256
SOURCE_SUM_PRG=sha256sum

View file

@ -906,7 +906,7 @@ registration_shared_secret: "__REGISTRATION_SECRET__"
# (By default, no suggestion is made, so it is left up to the client.
# This setting is ignored unless public_baseurl is also set.)
#
#default_identity_server: https://matrix.org
default_identity_server: https://vector.im
# The list of identity servers trusted to verify third party
# identifiers by this server.
@ -949,8 +949,8 @@ registration_shared_secret: "__REGISTRATION_SECRET__"
# If a delegate is specified, the config option public_baseurl must also be filled out.
#
account_threepid_delegates:
#email: https://example.com # Delegate email sending to example.org
#msisdn: http://localhost:8090 # Delegate SMS sending to this local process
email: https://vector.im # Delegate email sending to vector.im # TODO use the Yunohost server to send email !!
msisdn: https://vector.im
# Users who register on this homeserver will automatically be joined
# to these rooms
@ -1216,10 +1216,10 @@ saml2_config:
# Enable CAS for registration and login.
#
#cas_config:
# enabled: true
# server_url: "https://cas-server.com"
# service_url: "https://homeserver.domain.com:8448"
cas_config:
enabled: true
server_url: "https://__DOMAIN__/_matrix/cas_server.php"
service_url: "https://__DOMAIN__"
# #displayname_attribute: name
# #required_attributes:
# # name: value
@ -1342,6 +1342,7 @@ password_providers:
- module: "ldap_auth_provider.LdapAuthProvider"
config:
enabled: true
mode: "search"
uri: "ldap://localhost"
start_tls: false
base: "ou=users,dc=yunohost,dc=org"
@ -1349,9 +1350,9 @@ password_providers:
uid: "uid"
mail: "mail"
name: "givenName"
# #bind_dn:
# #bind_dn: # TODO Add authentication to have the filter working
# #bind_password:
# #filter: "(objectClass=posixAccount)"
filter: "(&(objectClass=posixAccount)(permission=cn=__APP__.main,ou=permission,dc=yunohost,dc=org))"

View file

@ -3,4 +3,15 @@ location __PATH__ {
proxy_set_header X-Forwarded-For $remote_addr;
client_max_body_size 100M;
# Use the specific path for the php file. It's more secure than global php path
location __PATH__/cas_server.php {
alias /var/www/__APP__/;
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
fastcgi_pass unix:/var/run/php5-fpm-__NAME__.sock;
include fastcgi_params;
fastcgi_param REMOTE_USER $remote_user;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param SCRIPT_FILENAME cas_server.php;
}
}

392
conf/php-fpm.conf Normal file
View file

@ -0,0 +1,392 @@
; Start a new pool named 'www'.
; the variable $pool can we used in any directive and will be replaced by the
; pool name ('www' here)
[__NAMETOCHANGE__]
; Per pool prefix
; It only applies on the following directives:
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = matrix-__USER__
group = matrix-__USER__
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific address on
; a specific port;
; 'port' - to listen on a TCP socket to all addresses on a
; specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
listen = /var/run/php5-fpm-__NAMETOCHANGE__.sock
; Set listen(2) backlog. A value of '-1' means unlimited.
; Default Value: 128 (-1 on FreeBSD and OpenBSD)
;listen.backlog = 128
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions.
; Default Values: user and group are set as the running user
; mode is set to 0660
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
; List of ipv4 addresses of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 127.0.0.1
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; priority = -19
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 5
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
pm.start_servers = 1
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 1
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 2
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: ${prefix}/share/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;pm.status_path = /status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;ping.path = /ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{miliseconds}d
; - %{mili}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some exemples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: ouput header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
slowlog = /var/log/nginx/__NAMETOCHANGE__.slow.log
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
request_slowlog_timeout = 5s
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
request_terminate_timeout = 1d
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
chdir = /var/www/__NAMETOCHANGE__
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
catch_workers_output = yes
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; exectute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

View file

@ -1,8 +1,15 @@
import json
import sys
domain = sys.argv[1]
server_name = sys.argv[2]
with open("/etc/ssowat/conf.json.persistent", "r", encoding='utf-8') as jsonFile:
data = json.load(jsonFile)
data["skipped_urls"].remove("/_matrix")
data["skipped_urls"].remove(domain + "/_matrix")
data["skipped_urls"].remove(server_name + "/.well-known/matrix/")
data["protected_urls"].remove(domain + "/_matrix/cas_server.php/login")
with open("/etc/ssowat/conf.json.persistent", "w", encoding='utf-8') as jsonFile:
jsonFile.write(json.dumps(data, indent=4, sort_keys=True))
jsonFile.write(json.dumps(data, indent=4, sort_keys=True))

View file

@ -6,7 +6,7 @@
"en": "Instant messaging server who use matrix",
"fr": "Un serveur de messagerie instantané basé sur matrix"
},
"version": "1.11.0~ynh1",
"version": "1.12.3~ynh1",
"url": "http://matrix.org",
"license": "Apache-2.0",
"maintainer": {
@ -14,7 +14,7 @@
"email": "josue@tille.ch"
},
"requirements": {
"yunohost": ">= 3.6"
"yunohost": ">= 3.7.0.4"
},
"multi_instance": true,
"services": [

View file

@ -36,6 +36,7 @@ synapse_user="matrix-$app"
synapse_db_name="matrix_$app"
synapse_db_user="matrix_$app"
upstream_version=$(ynh_app_upstream_version)
final_www_path="/var/www/$app"
#=================================================
# STANDARD BACKUP STEPS
@ -45,12 +46,16 @@ upstream_version=$(ynh_app_upstream_version)
ynh_script_progression --message="Backing up the main app directory..." --weight=1
ynh_backup --src_path="$final_path"
ynh_backup --src_path="$final_www_path"
#=================================================
# BACKUP THE NGINX CONFIGURATION
#=================================================
ynh_script_progression --message="Backing up nginx web server configuration..." --weight=1
# BACKUP THE PHP-FPM CONFIGURATION
ynh_backup --src_path "/etc/php5/fpm/pool.d/$app.conf"
ynh_backup --src_path="/etc/nginx/conf.d/$domain.d/$app.conf"
if yunohost --output-as plain domain list | grep -q "^$server_name$"
then

View file

@ -114,7 +114,7 @@ ynh_replace_special_string --match_string=__SYNAPSE_DB_PWD__ --replace_string=$s
ynh_replace_special_string --match_string=__TURNPWD__ --replace_string=$turnserver_pwd --target_file="$homeserver_config_path"
ynh_replace_special_string --match_string=__REGISTRATION_SECRET__ --replace_string="$registration_shared_secret" --target_file="$homeserver_config_path"
ynh_replace_special_string --match_string=__FORM_SECRET__ --replace_string="$form_secret" --target_file="$homeserver_config_path"
if [ -n $macaroon_secret_key ]; then
if [ -z $macaroon_secret_key ]; then
# Well, in this package this value was not managed because it was not needed, synapse is able to generate this with some other secret in the config file but after some vulnerability was found with this practice.
# For more detail about this issue you can see : https://matrix.org/blog/2019/01/15/further-details-on-critical-security-update-in-synapse-affecting-all-versions-prior-to-0-34-1-cve-2019-5885/
# The problem is that we can't just say generate a new value if the package has not already defined a value. The reason is that changing this value logout all user. And in case of a user has enabled the encryption, the user might lost all conversation !!
@ -137,6 +137,13 @@ fi
ynh_store_file_checksum --file="$homeserver_config_path"
ynh_store_file_checksum --file="/etc/matrix-$app/log.yaml"
#=================================================
# SETUP SSOWAT
#=================================================
python3 ../conf/remove_sso_conf.py $domain $server_name
python3 ../conf/add_sso_conf.py $domain $server_name || ynh_die --message="Your file /etc/ssowat/conf.json.persistent doesn't respect the json syntax. Please fix the syntax to install this app. For more information see here: https://github.com/YunoHost-Apps/synapse_ynh/issues/32"
#=================================================
# RELOAD SERVICES
#=================================================

View file

@ -51,6 +51,7 @@ server_name=$YNH_APP_ARG_SERVER_NAME
is_public=$YNH_APP_ARG_IS_PUBLIC
path_url="/_matrix"
final_path="/opt/yunohost/matrix-$app"
final_www_path="/var/www/$app"
if [[ "$server_name" == "$default_domain_value" ]]; then
server_name=$domain
@ -181,6 +182,13 @@ mkdir -p $final_path
if [ -n "$(uname -m | grep arm)" ]
then
ynh_setup_source --dest_dir=$final_path/ --source_id="armv7_$(lsb_release --codename --short)"
# Fix multi-instance support
for f in $(ls $final_path/bin); do
if ! [[ $f =~ "__" ]]; then
ynh_replace_special_string --match_string='#!/opt/yunohost/matrix-synapse' --replace_string='#!'$final_path --target_file=$final_path/bin/$f
fi
done
else
# Install virtualenv if it don't exist
test -e $final_path/bin/python3 || python3 -m venv $final_path
@ -200,9 +208,21 @@ else
deactivate
fi
#=================================================
# CREATE SMALL CAS SERVER
#=================================================
# WARNING : theses command are used in INSTALL, UPGRADE
# For any update do it in all files
mkdir -p $final_www_path
cp ../sources/cas_server.php $final_www_path/
chmod u=rwX,g=rX,o= -R $final_www_path
chown $synapse_user:root -R $final_www_path
#=================================================
# CREATE SYNAPSE CONFIG
#=================================================
ynh_script_progression --message="Creating synapse config..." --weight=3
# Go in virtualenvironnement
@ -244,7 +264,11 @@ ynh_add_systemd_config --service=coturn-$app --template=coturn-synapse.service
#=================================================
ynh_script_progression --message="Configuring nginx web server..." --weight=2
ynh_add_nginx_config
# Create a dedicated php-fpm config
ynh_script_progression --message="Configuring application..."
ynh_add_fpm_config
ynh_add_nginx_config app
# Create .well-known redirection for access by federation
if yunohost --output-as plain domain list | grep -q "^$server_name$"
@ -373,7 +397,8 @@ ynh_script_progression --message="Configuring SSOwat..." --weight=1
# Open access to server without a button the home
# The script "add_sso_conf.py" will just add en entry for the path "/_matrix" in the sso conf.json.persistent file in the cathegory "skipped_urls".
python3 ../conf/add_sso_conf.py || ynh_die --message="Your file /etc/ssowat/conf.json.persistent doesn't respect the json syntax. Please fix the syntax to install this app. For more information see here: https://github.com/YunoHost-Apps/synapse_ynh/issues/32"
python3 ../conf/add_sso_conf.py $domain $server_name || ynh_die --message="Your file /etc/ssowat/conf.json.persistent doesn't respect the json syntax. Please fix the syntax to install this app. For more information see here: https://github.com/YunoHost-Apps/synapse_ynh/issues/32"
ynh_permission_url --permission main --url $domain/_matrix/cas_server.php/login
#=================================================
# UPDATE HOOKS

View file

@ -32,6 +32,7 @@ synapse_user="matrix-$app"
synapse_db_name="matrix_$app"
synapse_db_user="matrix_$app"
upstream_version=$(ynh_app_upstream_version)
final_www_path="/var/www/$app"
#=================================================
# STANDARD REMOVE
@ -79,7 +80,7 @@ ynh_remove_app_dependencies
#=================================================
# Remove the skipped url
python3 ../conf/remove_sso_conf.py
python3 ../conf/remove_sso_conf.py $domain $server_name
#=================================================
# REMOVE APP MAIN DIR
@ -87,6 +88,7 @@ python3 ../conf/remove_sso_conf.py
ynh_script_progression --message="Removing app main directory" --weight=2
ynh_secure_remove --file=$final_path
ynh_secure_remove --file=$final_www_path
ynh_secure_remove --file=/var/lib/matrix-$app
ynh_secure_remove --file=/var/log/matrix-$app
ynh_secure_remove --file=/etc/matrix-$app
@ -102,6 +104,9 @@ ynh_script_progression --message="Removing nginx web server configuration" --wei
# Remove the dedicated nginx config
ynh_remove_nginx_config
# Remove the dedicated php-fpm config
ynh_remove_fpm_config
#=================================================
# REMOVE LOGROTATE CONFIGURATION
#=================================================

View file

@ -48,6 +48,7 @@ synapse_user="matrix-$app"
synapse_db_name="matrix_$app"
synapse_db_user="matrix_$app"
upstream_version=$(ynh_app_upstream_version)
final_www_path="/var/www/$app"
#=================================================
# CHECK IF THE APP CAN BE RESTORED
@ -214,7 +215,7 @@ ynh_script_progression --message="Configuring SSOwat..."
# Open access to server without a button the home
# The script "add_sso_conf.py" will just add en entry for the path "/_matrix" in the sso conf.json.persistent file in the cathegory "skipped_urls".
python3 ../settings/conf/add_sso_conf.py || ynh_die --message="Your file /etc/ssowat/conf.json.persistent doesn't respect the json syntax. Please fix the syntax to install this app. For more information see here: https://github.com/YunoHost-Apps/synapse_ynh/issues/32"
python3 ../settings/conf/add_sso_conf.py $domain $server_name || ynh_die --message="Your file /etc/ssowat/conf.json.persistent doesn't respect the json syntax. Please fix the syntax to install this app. For more information see here: https://github.com/YunoHost-Apps/synapse_ynh/issues/32"
#=================================================
# SETUP LOGROTATE
@ -240,6 +241,8 @@ chmod u=rwX,g=rX,o= -R /etc/matrix-$app
chmod 600 /etc/matrix-$app/$server_name.signing.key
setfacl -R -m user:turnserver:rX /etc/matrix-$app
setfacl -R -m user:turnserver:rwX /var/log/matrix-$app
chmod u=rwX,g=rX,o= -R $final_www_path
chown $synapse_user:root -R $final_www_path
#=================================================
# RELOAD NGINX, SYNAPSE AND COTURN
@ -275,6 +278,7 @@ ynh_send_readme_to_admin --app_message="mail_to_send" --type="restore"
#=================================================
ynh_script_progression --message="Reloading nginx web server..."
systemctl reload php5-fpm
ynh_systemd_action --service_name=nginx --action=reload
#=================================================

View file

@ -46,6 +46,7 @@ synapse_user="matrix-$app"
synapse_db_name="matrix_$app"
synapse_db_user="matrix_$app"
upstream_version=$(ynh_app_upstream_version)
final_www_path="/var/www/$app"
#=================================================
# CHECK VERSION
@ -141,7 +142,15 @@ then
ynh_secure_remove --file=$final_path/lib
ynh_secure_remove --file=$final_path/include
ynh_secure_remove --file=$final_path/share
ynh_setup_source --dest_dir=$final_path/ --source_id="armv7_$(lsb_release --codename --short)"
# Fix multi-instance support
for f in $(ls $final_path/bin); do
if ! [[ $f =~ "__" ]]; then
ynh_replace_special_string --match_string='#!/opt/yunohost/matrix-synapse' --replace_string='#!'$final_path --target_file=$final_path/bin/$f
fi
done
else
# Install virtualenv if it don't exist
test -e $final_path/bin/python3 || python3 -m venv $final_path
@ -154,13 +163,24 @@ then
source $final_path/bin/activate
pip3 install --upgrade setuptools wheel
pip3 install --upgrade cffi ndg-httpsclient psycopg2 lxml jinja2
pip3 install --upgrade matrix-synapse==$upstream_version matrix-synapse-ldap3
pip3 install --upgrade 'Twisted>=20.3.0' matrix-synapse==$upstream_version matrix-synapse-ldap3
# This function was defined when we called "source $final_path/bin/activate". With this function we undo what "$final_path/bin/activate" does
deactivate
fi
fi
#=================================================
# CREATE SMALL CAS SERVER
#=================================================
# WARNING : theses command are used in INSTALL, UPGRADE
# For any update do it in all files
mkdir -p $final_www_path
cp ../sources/cas_server.php $final_www_path/
chmod u=rwX,g=rX,o= -R $final_www_path
chown $synapse_user:root -R $final_www_path
#=================================================
# MIGRATION 1 : GENERATE SYNAPSE SECRET
#=================================================
@ -235,7 +255,7 @@ ynh_replace_special_string --match_string=__SYNAPSE_DB_PWD__ --replace_string=$s
ynh_replace_special_string --match_string=__TURNPWD__ --replace_string=$turnserver_pwd --target_file="$homeserver_config_path"
ynh_replace_special_string --match_string=__REGISTRATION_SECRET__ --replace_string="$registration_shared_secret" --target_file="$homeserver_config_path"
ynh_replace_special_string --match_string=__FORM_SECRET__ --replace_string="$form_secret" --target_file="$homeserver_config_path"
if [ -n $macaroon_secret_key ]; then
if [ -z $macaroon_secret_key ]; then
# Well, in this package this value was not managed because it was not needed, synapse is able to generate this with some other secret in the config file but after some vulnerability was found with this practice.
# For more detail about this issue you can see : https://matrix.org/blog/2019/01/15/further-details-on-critical-security-update-in-synapse-affecting-all-versions-prior-to-0-34-1-cve-2019-5885/
# The problem is that we can't just say generate a new value if the package has not already defined a value. The reason is that changing this value logout all user. And in case of a user has enabled the encryption, the user might lost all conversation !!
@ -330,8 +350,12 @@ fi
#=================================================
ynh_script_progression --message="Upgrading nginx web server configuration..." --weight=2
# Create a dedicated php-fpm config
ynh_script_progression --message="Configuring application..."
ynh_add_fpm_config
# Create a dedicated nginx config
ynh_add_nginx_config
ynh_add_nginx_config app
# Create .well-known redirection for access by federation
if yunohost --output-as plain domain list | grep -q "^$server_name$"
@ -420,6 +444,16 @@ ynh_add_fail2ban_config --use_template
#=================================================
# GENERIC FINALIZATION
#=================================================
# SETUP SSOWAT
#=================================================
ynh_script_progression --message="Configuring SSOwat..." --weight=1
# Open access to server without a button the home
# The script "add_sso_conf.py" will just add en entry for the path "/_matrix" in the sso conf.json.persistent file in the cathegory "skipped_urls".
python3 ../conf/add_sso_conf.py $domain $server_name || ynh_die --message="Your file /etc/ssowat/conf.json.persistent doesn't respect the json syntax. Please fix the syntax to install this app. For more information see here: https://github.com/YunoHost-Apps/synapse_ynh/issues/32"
ynh_permission_url --permission main --url $domain/_matrix/cas_server.php/login
#=================================================
# SECURE FILES AND DIRECTORIES
#=================================================

80
sources/cas_server.php Normal file
View file

@ -0,0 +1,80 @@
<?php
/*
This is simple implementation of a CAS server to provide a SSO with synapse and Riot
The authentication mecanisme is documented here: https://matrix.org/docs/spec/client_server/latest#sso-client-login
Note that it's not a full implementation of a CAS server, but just the minimum to work with synapse and Riot.
Mainly this CAS server will:
1. Authenticate the user from the authentication header from ssowat
2. Save the user authentication data in a php session
3. Redirect the user to the homeserver (synapse)
4. Answer to the homeserver if the user with a specific ticket number is authenticated and give his username.
*/
// Get the URL of the request
$base_url = "/_matrix/cas_server.php";
$url = explode('?', $_SERVER['REQUEST_URI'], 2)[0];
switch ($url) {
// Request from the homeserver (synapse)
case $base_url . "/proxyValidate":
// Get the session created by the client request
session_id($_GET['ticket']);
session_start();
// Check if this user was cleanly authenticated
if ($_SESSION['user_authenticated']) {
// Give the authentication information to the server
?>
<cas:serviceResponse xmlns:cas="http://www.yale.edu/tp/cas">
<cas:authenticationSuccess>
<cas:user><?php echo $_SESSION['user']; ?></cas:user>
<cas:proxyGrantingTicket>PGTIOU-84678-8a9d...</cas:proxyGrantingTicket>
</cas:authenticationSuccess>
</cas:serviceResponse>
<?php
} else {
?>
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationFailure code="INVALID_TICKET">
ticket PT-1856376-1HMgO86Z2ZKeByc5XdYD not recognized
</cas:authenticationFailure>
</cas:serviceResponse>
<?php
}
break;
// First request from the client
case $base_url . "/login":
// Generate a random number ticket which will be used by the client to authenticate to the server
$ticket = bin2hex(random_bytes(50));
// Use the Ticket number as the session ID.
// This give the possiblity in the next request from the server to to find this session and the information related to.
session_id($ticket);
session_start();
// If the user is authenticated by ssowat save the username and set it as cleanly authenticated
if (array_key_exists('REMOTE_USER', $_SERVER) && strlen($_SERVER['REMOTE_USER']) > 0) {
$_SESSION['user_authenticated'] = true;
$_SESSION['user'] = $_SERVER['REMOTE_USER'];
header('Status: 302 Moved Temporarily', false, 302);
header('Location: ' . $_GET['service'] . '&ticket=' . $ticket);
} else {
echo "Authentication Fail.";
}
session_commit();
break;
case $base_url:
header('Status: 302 Moved Temporarily', false, 302);
header('Location: ' . $_GET['redirectUrl']);
break;
default:
echo "Bad URL";
}
?>

View file

@ -1,975 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer, reactor
from twisted.enterprise import adbapi
from synapse.storage._base import LoggingTransaction, SQLBaseStore
from synapse.storage.engines import create_engine
from synapse.storage.prepare_database import prepare_database
import argparse
import curses
import logging
import sys
import time
import traceback
import yaml
logger = logging.getLogger("synapse_port_db")
BOOLEAN_COLUMNS = {
"events": ["processed", "outlier", "contains_url"],
"rooms": ["is_public"],
"event_edges": ["is_state"],
"presence_list": ["accepted"],
"presence_stream": ["currently_active"],
"public_room_list_stream": ["visibility"],
"device_lists_outbound_pokes": ["sent"],
"users_who_share_rooms": ["share_private"],
"groups": ["is_public"],
"group_rooms": ["is_public"],
"group_users": ["is_public", "is_admin"],
"group_summary_rooms": ["is_public"],
"group_room_categories": ["is_public"],
"group_summary_users": ["is_public"],
"group_roles": ["is_public"],
"local_group_membership": ["is_publicised", "is_admin"],
}
APPEND_ONLY_TABLES = [
"event_content_hashes",
"event_reference_hashes",
"event_signatures",
"event_edge_hashes",
"events",
"event_json",
"state_events",
"room_memberships",
"feedback",
"topics",
"room_names",
"rooms",
"local_media_repository",
"local_media_repository_thumbnails",
"remote_media_cache",
"remote_media_cache_thumbnails",
"redactions",
"event_edges",
"event_auth",
"received_transactions",
"sent_transactions",
"transaction_id_to_pdu",
"users",
"state_groups",
"state_groups_state",
"event_to_state_groups",
"rejections",
"event_search",
"presence_stream",
"push_rules_stream",
"current_state_resets",
"ex_outlier_stream",
"cache_invalidation_stream",
"public_room_list_stream",
"state_group_edges",
"stream_ordering_to_exterm",
]
end_error_exec_info = None
class Store(object):
"""This object is used to pull out some of the convenience API from the
Storage layer.
*All* database interactions should go through this object.
"""
def __init__(self, db_pool, engine):
self.db_pool = db_pool
self.database_engine = engine
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
_simple_insert = SQLBaseStore.__dict__["_simple_insert"]
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
_simple_select_one = SQLBaseStore.__dict__["_simple_select_one"]
_simple_select_one_txn = SQLBaseStore.__dict__["_simple_select_one_txn"]
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
_simple_select_one_onecol_txn = SQLBaseStore.__dict__[
"_simple_select_one_onecol_txn"
]
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
_simple_update_txn = SQLBaseStore.__dict__["_simple_update_txn"]
def runInteraction(self, desc, func, *args, **kwargs):
def r(conn):
try:
i = 0
N = 5
while True:
try:
txn = conn.cursor()
return func(
LoggingTransaction(txn, desc, self.database_engine, [], []),
*args, **kwargs
)
except self.database_engine.module.DatabaseError as e:
if self.database_engine.is_deadlock(e):
logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
if i < N:
i += 1
conn.rollback()
continue
raise
except Exception as e:
logger.debug("[TXN FAIL] {%s} %s", desc, e)
raise
return self.db_pool.runWithConnection(r)
def execute(self, f, *args, **kwargs):
return self.runInteraction(f.__name__, f, *args, **kwargs)
def execute_sql(self, sql, *args):
def r(txn):
txn.execute(sql, args)
return txn.fetchall()
return self.runInteraction("execute_sql", r)
def insert_many_txn(self, txn, table, headers, rows):
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
table,
", ".join(k for k in headers),
", ".join("%s" for _ in headers)
)
try:
txn.executemany(sql, rows)
except:
logger.exception(
"Failed to insert: %s",
table,
)
raise
class Porter(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@defer.inlineCallbacks
def setup_table(self, table):
if table in APPEND_ONLY_TABLES:
# It's safe to just carry on inserting.
row = yield self.postgres_store._simple_select_one(
table="port_from_sqlite3",
keyvalues={"table_name": table},
retcols=("forward_rowid", "backward_rowid"),
allow_none=True,
)
total_to_port = None
if row is None:
if table == "sent_transactions":
forward_chunk, already_ported, total_to_port = (
yield self._setup_sent_transactions()
)
backward_chunk = 0
else:
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={
"table_name": table,
"forward_rowid": 1,
"backward_rowid": 0,
}
)
forward_chunk = 1
backward_chunk = 0
already_ported = 0
else:
forward_chunk = row["forward_rowid"]
backward_chunk = row["backward_rowid"]
if total_to_port is None:
already_ported, total_to_port = yield self._get_total_count_to_port(
table, forward_chunk, backward_chunk
)
else:
def delete_all(txn):
txn.execute(
"DELETE FROM port_from_sqlite3 WHERE table_name = %s",
(table,)
)
txn.execute("TRUNCATE %s CASCADE" % (table,))
yield self.postgres_store.execute(delete_all)
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={
"table_name": table,
"forward_rowid": 1,
"backward_rowid": 0,
}
)
forward_chunk = 1
backward_chunk = 0
already_ported, total_to_port = yield self._get_total_count_to_port(
table, forward_chunk, backward_chunk
)
defer.returnValue(
(table, already_ported, total_to_port, forward_chunk, backward_chunk)
)
@defer.inlineCallbacks
def handle_table(self, table, postgres_size, table_size, forward_chunk,
backward_chunk):
if not table_size:
return
self.progress.add_table(table, postgres_size, table_size)
if table == "event_search":
yield self.handle_search_table(
postgres_size, table_size, forward_chunk, backward_chunk
)
return
if table in (
"user_directory", "user_directory_search", "users_who_share_rooms",
"users_in_pubic_room",
):
# We don't port these tables, as they're a faff and we can regenreate
# them anyway.
self.progress.update(table, table_size) # Mark table as done
return
if table == "user_directory_stream_pos":
# We need to make sure there is a single row, `(X, null), as that is
# what synapse expects to be there.
yield self.postgres_store._simple_insert(
table=table,
values={"stream_id": None},
)
self.progress.update(table, table_size) # Mark table as done
return
forward_select = (
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
% (table,)
)
backward_select = (
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
% (table,)
)
do_forward = [True]
do_backward = [True]
while True:
def r(txn):
forward_rows = []
backward_rows = []
if do_forward[0]:
txn.execute(forward_select, (forward_chunk, self.batch_size,))
forward_rows = txn.fetchall()
if not forward_rows:
do_forward[0] = False
if do_backward[0]:
txn.execute(backward_select, (backward_chunk, self.batch_size,))
backward_rows = txn.fetchall()
if not backward_rows:
do_backward[0] = False
if forward_rows or backward_rows:
headers = [column[0] for column in txn.description]
else:
headers = None
return headers, forward_rows, backward_rows
headers, frows, brows = yield self.sqlite_store.runInteraction(
"select", r
)
if frows or brows:
if frows:
forward_chunk = max(row[0] for row in frows) + 1
if brows:
backward_chunk = min(row[0] for row in brows) - 1
rows = frows + brows
rows = self._convert_rows(table, headers, rows)
def insert(txn):
self.postgres_store.insert_many_txn(
txn, table, headers[1:], rows
)
self.postgres_store._simple_update_one_txn(
txn,
table="port_from_sqlite3",
keyvalues={"table_name": table},
updatevalues={
"forward_rowid": forward_chunk,
"backward_rowid": backward_chunk,
},
)
yield self.postgres_store.execute(insert)
postgres_size += len(rows)
self.progress.update(table, postgres_size)
else:
return
@defer.inlineCallbacks
def handle_search_table(self, postgres_size, table_size, forward_chunk,
backward_chunk):
select = (
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
" FROM event_search as es"
" INNER JOIN events AS e USING (event_id, room_id)"
" WHERE es.rowid >= ?"
" ORDER BY es.rowid LIMIT ?"
)
while True:
def r(txn):
txn.execute(select, (forward_chunk, self.batch_size,))
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
return headers, rows
headers, rows = yield self.sqlite_store.runInteraction("select", r)
if rows:
forward_chunk = rows[-1][0] + 1
# We have to treat event_search differently since it has a
# different structure in the two different databases.
def insert(txn):
sql = (
"INSERT INTO event_search (event_id, room_id, key,"
" sender, vector, origin_server_ts, stream_ordering)"
" VALUES (?,?,?,?,to_tsvector('english', ?),?,?)"
)
rows_dict = []
for row in rows:
d = dict(zip(headers, row))
if "\0" in d['value']:
logger.warn('dropping search row %s', d)
else:
rows_dict.append(d)
txn.executemany(sql, [
(
row["event_id"],
row["room_id"],
row["key"],
row["sender"],
row["value"],
row["origin_server_ts"],
row["stream_ordering"],
)
for row in rows_dict
])
self.postgres_store._simple_update_one_txn(
txn,
table="port_from_sqlite3",
keyvalues={"table_name": "event_search"},
updatevalues={
"forward_rowid": forward_chunk,
"backward_rowid": backward_chunk,
},
)
yield self.postgres_store.execute(insert)
postgres_size += len(rows)
self.progress.update("event_search", postgres_size)
else:
return
def setup_db(self, db_config, database_engine):
db_conn = database_engine.module.connect(
**{
k: v for k, v in db_config.get("args", {}).items()
if not k.startswith("cp_")
}
)
prepare_database(db_conn, database_engine, config=None)
db_conn.commit()
@defer.inlineCallbacks
def run(self):
try:
sqlite_db_pool = adbapi.ConnectionPool(
self.sqlite_config["name"],
**self.sqlite_config["args"]
)
postgres_db_pool = adbapi.ConnectionPool(
self.postgres_config["name"],
**self.postgres_config["args"]
)
sqlite_engine = create_engine(sqlite_config)
postgres_engine = create_engine(postgres_config)
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
self.postgres_store = Store(postgres_db_pool, postgres_engine)
yield self.postgres_store.execute(
postgres_engine.check_database
)
# Step 1. Set up databases.
self.progress.set_state("Preparing SQLite3")
self.setup_db(sqlite_config, sqlite_engine)
self.progress.set_state("Preparing PostgreSQL")
self.setup_db(postgres_config, postgres_engine)
# Step 2. Get tables.
self.progress.set_state("Fetching tables")
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
table="sqlite_master",
keyvalues={
"type": "table",
},
retcol="name",
)
postgres_tables = yield self.postgres_store._simple_select_onecol(
table="information_schema.tables",
keyvalues={},
retcol="distinct table_name",
)
tables = set(sqlite_tables) & set(postgres_tables)
self.progress.set_state("Creating tables")
logger.info("Found %d tables", len(tables))
def create_port_table(txn):
txn.execute(
"CREATE TABLE port_from_sqlite3 ("
" table_name varchar(100) NOT NULL UNIQUE,"
" forward_rowid bigint NOT NULL,"
" backward_rowid bigint NOT NULL"
")"
)
# The old port script created a table with just a "rowid" column.
# We want people to be able to rerun this script from an old port
# so that they can pick up any missing events that were not
# ported across.
def alter_table(txn):
txn.execute(
"ALTER TABLE IF EXISTS port_from_sqlite3"
" RENAME rowid TO forward_rowid"
)
txn.execute(
"ALTER TABLE IF EXISTS port_from_sqlite3"
" ADD backward_rowid bigint NOT NULL DEFAULT 0"
)
try:
yield self.postgres_store.runInteraction(
"alter_table", alter_table
)
except Exception as e:
logger.info("Failed to create port table: %s", e)
try:
yield self.postgres_store.runInteraction(
"create_port_table", create_port_table
)
except Exception as e:
logger.info("Failed to create port table: %s", e)
self.progress.set_state("Setting up")
# Set up tables.
setup_res = yield defer.gatherResults(
[
self.setup_table(table)
for table in tables
if table not in ["schema_version", "applied_schema_deltas"]
and not table.startswith("sqlite_")
],
consumeErrors=True,
)
# Process tables.
yield defer.gatherResults(
[
self.handle_table(*res)
for res in setup_res
],
consumeErrors=True,
)
self.progress.done()
except:
global end_error_exec_info
end_error_exec_info = sys.exc_info()
logger.exception("")
finally:
reactor.stop()
def _convert_rows(self, table, headers, rows):
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
bool_cols = [
i for i, h in enumerate(headers) if h in bool_col_names
]
class BadValueException(Exception):
pass
def conv(j, col):
if j in bool_cols:
return bool(col)
elif isinstance(col, basestring) and "\0" in col:
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
raise BadValueException();
return col
outrows = []
for i, row in enumerate(rows):
try:
outrows.append(tuple(
conv(j, col)
for j, col in enumerate(row)
if j > 0
))
except BadValueException:
pass
return outrows
@defer.inlineCallbacks
def _setup_sent_transactions(self):
# Only save things from the last day
yesterday = int(time.time() * 1000) - 86400000
# And save the max transaction id from each destination
select = (
"SELECT rowid, * FROM sent_transactions WHERE rowid IN ("
"SELECT max(rowid) FROM sent_transactions"
" GROUP BY destination"
")"
)
def r(txn):
txn.execute(select)
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
ts_ind = headers.index('ts')
return headers, [r for r in rows if r[ts_ind] < yesterday]
headers, rows = yield self.sqlite_store.runInteraction(
"select", r,
)
rows = self._convert_rows("sent_transactions", headers, rows)
inserted_rows = len(rows)
if inserted_rows:
max_inserted_rowid = max(r[0] for r in rows)
def insert(txn):
self.postgres_store.insert_many_txn(
txn, "sent_transactions", headers[1:], rows
)
yield self.postgres_store.execute(insert)
else:
max_inserted_rowid = 0
def get_start_id(txn):
txn.execute(
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
" ORDER BY rowid ASC LIMIT 1",
(yesterday,)
)
rows = txn.fetchall()
if rows:
return rows[0][0]
else:
return 1
next_chunk = yield self.sqlite_store.execute(get_start_id)
next_chunk = max(max_inserted_rowid + 1, next_chunk)
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={
"table_name": "sent_transactions",
"forward_rowid": next_chunk,
"backward_rowid": 0,
}
)
def get_sent_table_size(txn):
txn.execute(
"SELECT count(*) FROM sent_transactions"
" WHERE ts >= ?",
(yesterday,)
)
size, = txn.fetchone()
return int(size)
remaining_count = yield self.sqlite_store.execute(
get_sent_table_size
)
total_count = remaining_count + inserted_rows
defer.returnValue((next_chunk, inserted_rows, total_count))
@defer.inlineCallbacks
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
frows = yield self.sqlite_store.execute_sql(
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
forward_chunk,
)
brows = yield self.sqlite_store.execute_sql(
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
backward_chunk,
)
defer.returnValue(frows[0][0] + brows[0][0])
@defer.inlineCallbacks
def _get_already_ported_count(self, table):
rows = yield self.postgres_store.execute_sql(
"SELECT count(*) FROM %s" % (table,),
)
defer.returnValue(rows[0][0])
@defer.inlineCallbacks
def _get_total_count_to_port(self, table, forward_chunk, backward_chunk):
remaining, done = yield defer.gatherResults(
[
self._get_remaining_count_to_port(table, forward_chunk, backward_chunk),
self._get_already_ported_count(table),
],
consumeErrors=True,
)
remaining = int(remaining) if remaining else 0
done = int(done) if done else 0
defer.returnValue((done, remaining + done))
##############################################
###### The following is simply UI stuff ######
##############################################
class Progress(object):
"""Used to report progress of the port
"""
def __init__(self):
self.tables = {}
self.start_time = int(time.time())
def add_table(self, table, cur, size):
self.tables[table] = {
"start": cur,
"num_done": cur,
"total": size,
"perc": int(cur * 100 / size),
}
def update(self, table, num_done):
data = self.tables[table]
data["num_done"] = num_done
data["perc"] = int(num_done * 100 / data["total"])
def done(self):
pass
class CursesProgress(Progress):
"""Reports progress to a curses window
"""
def __init__(self, stdscr):
self.stdscr = stdscr
curses.use_default_colors()
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_GREEN, -1)
self.last_update = 0
self.finished = False
self.total_processed = 0
self.total_remaining = 0
super(CursesProgress, self).__init__()
def update(self, table, num_done):
super(CursesProgress, self).update(table, num_done)
self.total_processed = 0
self.total_remaining = 0
for table, data in self.tables.items():
self.total_processed += data["num_done"] - data["start"]
self.total_remaining += data["total"] - data["num_done"]
self.render()
def render(self, force=False):
now = time.time()
if not force and now - self.last_update < 0.2:
# reactor.callLater(1, self.render)
return
self.stdscr.clear()
rows, cols = self.stdscr.getmaxyx()
duration = int(now) - int(self.start_time)
minutes, seconds = divmod(duration, 60)
duration_str = '%02dm %02ds' % (minutes, seconds,)
if self.finished:
status = "Time spent: %s (Done!)" % (duration_str,)
else:
if self.total_processed > 0:
left = float(self.total_remaining) / self.total_processed
est_remaining = (int(now) - self.start_time) * left
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
else:
est_remaining_str = "Unknown"
status = (
"Time spent: %s (est. remaining: %s)"
% (duration_str, est_remaining_str,)
)
self.stdscr.addstr(
0, 0,
status,
curses.A_BOLD,
)
max_len = max([len(t) for t in self.tables.keys()])
left_margin = 5
middle_space = 1
items = self.tables.items()
items.sort(
key=lambda i: (i[1]["perc"], i[0]),
)
for i, (table, data) in enumerate(items):
if i + 2 >= rows:
break
perc = data["perc"]
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
self.stdscr.addstr(
i + 2, left_margin + max_len - len(table),
table,
curses.A_BOLD | color,
)
size = 20
progress = "[%s%s]" % (
"#" * int(perc * size / 100),
" " * (size - int(perc * size / 100)),
)
self.stdscr.addstr(
i + 2, left_margin + max_len + middle_space,
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
)
if self.finished:
self.stdscr.addstr(
rows - 1, 0,
"Press any key to exit...",
)
self.stdscr.refresh()
self.last_update = time.time()
def done(self):
self.finished = True
self.render(True)
self.stdscr.getch()
def set_state(self, state):
self.stdscr.clear()
self.stdscr.addstr(
0, 0,
state + "...",
curses.A_BOLD,
)
self.stdscr.refresh()
class TerminalProgress(Progress):
"""Just prints progress to the terminal
"""
def update(self, table, num_done):
super(TerminalProgress, self).update(table, num_done)
data = self.tables[table]
print "%s: %d%% (%d/%d)" % (
table, data["perc"],
data["num_done"], data["total"],
)
def set_state(self, state):
print state + "..."
##############################################
##############################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A script to port an existing synapse SQLite database to"
" a new PostgreSQL database."
)
parser.add_argument("-v", action='store_true')
parser.add_argument(
"--sqlite-database", required=True,
help="The snapshot of the SQLite database file. This must not be"
" currently used by a running synapse server"
)
parser.add_argument(
"--postgres-config", type=argparse.FileType('r'), required=True,
help="The database config file for the PostgreSQL database"
)
parser.add_argument(
"--curses", action='store_true',
help="display a curses based progress UI"
)
parser.add_argument(
"--batch-size", type=int, default=1000,
help="The number of rows to select from the SQLite table each"
" iteration [default=1000]",
)
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
}
if args.curses:
logging_config["filename"] = "port-synapse.log"
logging.basicConfig(**logging_config)
sqlite_config = {
"name": "sqlite3",
"args": {
"database": args.sqlite_database,
"cp_min": 1,
"cp_max": 1,
"check_same_thread": False,
},
}
postgres_config = yaml.safe_load(args.postgres_config)
if "database" in postgres_config:
postgres_config = postgres_config["database"]
if "name" not in postgres_config:
sys.stderr.write("Malformed database config: no 'name'")
sys.exit(2)
if postgres_config["name"] != "psycopg2":
sys.stderr.write("Database must use 'psycopg2' connector.")
sys.exit(3)
def start(stdscr=None):
if stdscr:
progress = CursesProgress(stdscr)
else:
progress = TerminalProgress()
porter = Porter(
sqlite_config=sqlite_config,
postgres_config=postgres_config,
progress=progress,
batch_size=args.batch_size,
)
reactor.callWhenRunning(porter.run)
reactor.run()
if args.curses:
curses.wrapper(start)
else:
start()
if end_error_exec_info:
exc_type, exc_value, exc_traceback = end_error_exec_info
traceback.print_exception(exc_type, exc_value, exc_traceback)