mirror of
https://github.com/YunoHost/dynette.git
synced 2024-09-03 20:06:17 +02:00
Merge pull request #9 from YunoHost/queue_worker_to_avoid_useless_ios
[enh] queue/worker pattern to avoid hammering the server with ios
This commit is contained in:
commit
65f83fb20d
2 changed files with 132 additions and 82 deletions
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
### Configuration ###
|
### Configuration ###
|
||||||
|
|
||||||
|
postgresql_dsn = "dbname=dynette user=dynette password=myPassword"
|
||||||
conf_file = '/etc/bind/named.conf.local' # Include this filename in '/etc/bind/named.conf'
|
conf_file = '/etc/bind/named.conf.local' # Include this filename in '/etc/bind/named.conf'
|
||||||
zone_dir = '/var/lib/bind/' # Do not forget the trailing '/'
|
zone_dir = '/var/lib/bind/' # Do not forget the trailing '/'
|
||||||
subs_urls = ['https://dyndns.yunohost.org'] # 127.0.0.1 if you install subscribe server locally
|
subs_urls = ['https://dyndns.yunohost.org'] # 127.0.0.1 if you install subscribe server locally
|
||||||
|
@ -25,9 +26,12 @@ allowed_operations = {
|
||||||
### Script ###
|
### Script ###
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
import json
|
import json
|
||||||
|
import psycopg2
|
||||||
from urllib import urlopen
|
from urllib import urlopen
|
||||||
|
|
||||||
|
|
||||||
# Get master key
|
# Get master key
|
||||||
master_key_path = os.path.join(os.path.dirname(__file__), 'master.key')
|
master_key_path = os.path.join(os.path.dirname(__file__), 'master.key')
|
||||||
master_key = open(master_key_path).read().rstrip()
|
master_key = open(master_key_path).read().rstrip()
|
||||||
|
@ -35,8 +39,35 @@ master_key = open(master_key_path).read().rstrip()
|
||||||
# Bind configuration
|
# Bind configuration
|
||||||
lines = ['// Generated by Dynette CRON']
|
lines = ['// Generated by Dynette CRON']
|
||||||
|
|
||||||
# Loop through Dynette servers
|
with psycopg2.connect(postgresql_dsn) as postgresql_connection:
|
||||||
for url in subs_urls:
|
with postgresql_connection.cursor() as psql:
|
||||||
|
# look in the job queue if we have tasks to handle
|
||||||
|
need_rewrite = False
|
||||||
|
need_bind9_cache_flush = False
|
||||||
|
|
||||||
|
# DataMapper convert table names to lower cases and add a "s" at the
|
||||||
|
# end
|
||||||
|
# consume all available tasks at once to merge them and avoir doing
|
||||||
|
# useless jobs
|
||||||
|
psql.execute("SELECT task FROM jobqueues ORDER BY id ASC;")
|
||||||
|
|
||||||
|
for task in psql.fetchall():
|
||||||
|
task = task[0]
|
||||||
|
if task == "conf_rewrite":
|
||||||
|
need_rewrite = True
|
||||||
|
elif task == "bind9_cache_flush":
|
||||||
|
need_bind9_cache_flush = True
|
||||||
|
|
||||||
|
if not need_rewrite and not need_bind9_cache_flush:
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# we have consume all the jobs, flush it
|
||||||
|
# because we are in a SQL transaction we won't have situation where a
|
||||||
|
# job could be added just after we read them all
|
||||||
|
psql.execute("DELETE FROM jobqueues;")
|
||||||
|
|
||||||
|
# Loop through Dynette servers
|
||||||
|
for url in subs_urls:
|
||||||
|
|
||||||
lines.extend([
|
lines.extend([
|
||||||
'key dynette. {',
|
'key dynette. {',
|
||||||
|
@ -100,28 +131,27 @@ for url in subs_urls:
|
||||||
'};',
|
'};',
|
||||||
])
|
])
|
||||||
|
|
||||||
# Backup old Bind configuration file.
|
# update bind9 zone
|
||||||
os.system('cp '+ conf_file +' '+ conf_file +'.back')
|
if need_rewrite:
|
||||||
|
# Backup old Bind configuration file.
|
||||||
|
os.system('cp '+ conf_file +' '+ conf_file +'.back')
|
||||||
|
|
||||||
# Write Bind configuration file.
|
# Write Bind configuration file.
|
||||||
with open(conf_file, 'w') as zone:
|
with open(conf_file, 'w') as zone:
|
||||||
zone.write('\n'.join(lines) + '\n')
|
zone.write('\n'.join(lines) + '\n')
|
||||||
|
|
||||||
# Restore ownership
|
# Restore ownership
|
||||||
os.system('chown -R bind:bind '+ zone_dir +' '+ conf_file)
|
os.system('chown -R bind:bind '+ zone_dir +' '+ conf_file)
|
||||||
|
|
||||||
# Reload Bind
|
# Reload Bind
|
||||||
if os.system('/usr/sbin/rndc reload') == 0:
|
if os.system('/usr/sbin/rndc reload') != 0:
|
||||||
exit(0)
|
|
||||||
else:
|
|
||||||
os.system('cp '+ conf_file +' '+ conf_file +'.bad')
|
os.system('cp '+ conf_file +' '+ conf_file +'.bad')
|
||||||
os.system('cp '+ conf_file +'.back '+ conf_file)
|
os.system('cp '+ conf_file +'.back '+ conf_file)
|
||||||
os.system('/usr/sbin/rndc reload')
|
os.system('/usr/sbin/rndc reload')
|
||||||
print("An error occured ! Please check daemon.log and your conf.bad")
|
print("An error occured ! Please check daemon.log and your conf.bad")
|
||||||
exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# mein got this is so awful
|
# flush bind9 cache (mostly because we got a hmac-sha512 key migration
|
||||||
if os.path.exists('/tmp/dynette_flush_bind_cache'):
|
if need_bind9_cache_flush:
|
||||||
os.system('/usr/sbin/rndc flush')
|
os.system('/usr/sbin/rndc flush')
|
||||||
os.system('/usr/sbin/rndc reload')
|
os.system('/usr/sbin/rndc reload')
|
||||||
os.system('rm /tmp/dynette_flush_bind_cache')
|
|
||||||
|
|
30
dynette.rb
30
dynette.rb
|
@ -68,6 +68,25 @@ class Ipban
|
||||||
property :ip_addr, String, :key => true
|
property :ip_addr, String, :key => true
|
||||||
end
|
end
|
||||||
|
|
||||||
|
################
|
||||||
|
### JobQueue ###
|
||||||
|
################
|
||||||
|
|
||||||
|
# JobQueue to communicate with the conf updater
|
||||||
|
class Jobqueue
|
||||||
|
include DataMapper::Resource
|
||||||
|
|
||||||
|
property :id, Serial, :key => true
|
||||||
|
property :task, String
|
||||||
|
end
|
||||||
|
|
||||||
|
def schedule_conf_rewrite()
|
||||||
|
Jobqueue.create(:task => "conf_rewrite")
|
||||||
|
end
|
||||||
|
|
||||||
|
def schedule_bind9_cache_flush()
|
||||||
|
Jobqueue.create(:task => "bind9_cache_flush")
|
||||||
|
end
|
||||||
|
|
||||||
################
|
################
|
||||||
### Handlers ###
|
### Handlers ###
|
||||||
|
@ -198,6 +217,7 @@ post '/key/:public_key' do
|
||||||
entry.ips << Ip.create(:ip_addr => request.ip)
|
entry.ips << Ip.create(:ip_addr => request.ip)
|
||||||
|
|
||||||
if entry.save
|
if entry.save
|
||||||
|
schedule_conf_rewrite
|
||||||
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
|
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
|
||||||
else
|
else
|
||||||
halt 412, { :error => "A problem occured during DNS registration" }.to_json
|
halt 412, { :error => "A problem occured during DNS registration" }.to_json
|
||||||
|
@ -237,11 +257,8 @@ put '/migrate_key_to_sha512/' do
|
||||||
halt 412, { :error => "A problem occured during key algo migration" }.to_json
|
halt 412, { :error => "A problem occured during key algo migration" }.to_json
|
||||||
end
|
end
|
||||||
|
|
||||||
# I don't have any other way of communicating with this dynette.cron.py
|
schedule_conf_rewrite
|
||||||
# this is awful
|
schedule_bind9_cache_flush
|
||||||
File.open("/tmp/dynette_flush_bind_cache", "w").close
|
|
||||||
# let's try flusing here, hope that could help ... (this design is so awful)
|
|
||||||
`/usr/sbin/rndc flush`
|
|
||||||
|
|
||||||
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
|
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
|
||||||
end
|
end
|
||||||
|
@ -255,6 +272,7 @@ put '/key/:public_key' do
|
||||||
end
|
end
|
||||||
entry.current_ip = request.ip
|
entry.current_ip = request.ip
|
||||||
if entry.save
|
if entry.save
|
||||||
|
schedule_conf_rewrite
|
||||||
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
|
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
|
||||||
else
|
else
|
||||||
halt 412, { :error => "A problem occured during DNS update" }.to_json
|
halt 412, { :error => "A problem occured during DNS update" }.to_json
|
||||||
|
@ -270,6 +288,7 @@ delete '/key/:public_key' do
|
||||||
if entry = Entry.first(:public_key => params[:public_key])
|
if entry = Entry.first(:public_key => params[:public_key])
|
||||||
Ip.first(:entry_id => entry.id).destroy
|
Ip.first(:entry_id => entry.id).destroy
|
||||||
if entry.destroy
|
if entry.destroy
|
||||||
|
schedule_conf_rewrite
|
||||||
halt 200, "OK".to_json
|
halt 200, "OK".to_json
|
||||||
else
|
else
|
||||||
halt 412, { :error => "A problem occured during DNS deletion" }.to_json
|
halt 412, { :error => "A problem occured during DNS deletion" }.to_json
|
||||||
|
@ -296,6 +315,7 @@ delete '/domains/:subdomain' do
|
||||||
|
|
||||||
Ip.first(:entry_id => entry.id).destroy
|
Ip.first(:entry_id => entry.id).destroy
|
||||||
if entry.destroy
|
if entry.destroy
|
||||||
|
schedule_conf_rewrite
|
||||||
halt 200, "OK".to_json
|
halt 200, "OK".to_json
|
||||||
else
|
else
|
||||||
halt 412, { :error => "A problem occured during DNS deletion" }.to_json
|
halt 412, { :error => "A problem occured during DNS deletion" }.to_json
|
||||||
|
|
Loading…
Reference in a new issue