Merge pull request #9 from YunoHost/queue_worker_to_avoid_useless_ios

[enh] queue/worker pattern to avoid hammering the server with ios
This commit is contained in:
Bram 2018-04-19 22:29:22 +02:00 committed by GitHub
commit 65f83fb20d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 132 additions and 82 deletions

View file

@ -2,6 +2,7 @@
### Configuration ###
postgresql_dsn = "dbname=dynette user=dynette password=myPassword"
conf_file = '/etc/bind/named.conf.local' # Include this filename in '/etc/bind/named.conf'
zone_dir = '/var/lib/bind/' # Do not forget the trailing '/'
subs_urls = ['https://dyndns.yunohost.org'] # 127.0.0.1 if you install subscribe server locally
@ -25,9 +26,12 @@ allowed_operations = {
### Script ###
import os
import sys
import json
import psycopg2
from urllib import urlopen
# Get master key
master_key_path = os.path.join(os.path.dirname(__file__), 'master.key')
master_key = open(master_key_path).read().rstrip()
@ -35,93 +39,119 @@ master_key = open(master_key_path).read().rstrip()
# Bind configuration
lines = ['// Generated by Dynette CRON']
# Loop through Dynette servers
for url in subs_urls:
with psycopg2.connect(postgresql_dsn) as postgresql_connection:
with postgresql_connection.cursor() as psql:
# look in the job queue if we have tasks to handle
need_rewrite = False
need_bind9_cache_flush = False
lines.extend([
'key dynette. {',
' algorithm hmac-md5;',
' secret "'+ master_key +'";',
'};',
])
# DataMapper convert table names to lower cases and add a "s" at the
# end
# consume all available tasks at once to merge them and avoir doing
# useless jobs
psql.execute("SELECT task FROM jobqueues ORDER BY id ASC;")
# Get available DynDNS domains
domains = json.loads(str(urlopen(url +'/domains').read()))
for domain in domains:
for task in psql.fetchall():
task = task[0]
if task == "conf_rewrite":
need_rewrite = True
elif task == "bind9_cache_flush":
need_bind9_cache_flush = True
# Create zone database if not present
if not os.path.exists(zone_dir + domain +'.db'):
db_lines = [
'$ORIGIN .',
'$TTL 10 ; 10 seconds',
domain+'. IN SOA '+ ns0 +'. '+ rname +'. (',
' 18 ; serial',
' 10800 ; refresh (3 hours)',
' 3600 ; retry (1 hour)',
' 604800 ; expire (1 week)',
' 10 ; minimum (10 seconds)',
' )',
'$TTL 3600 ; 1 hour',
' NS '+ ns0 +'.',
' NS '+ ns1 +'.',
'',
'$ORIGIN '+ domain +'.',
]
with open(zone_dir + domain +'.db', 'w') as zone:
for line in db_lines:
zone.write(line + '\n')
if not need_rewrite and not need_bind9_cache_flush:
sys.exit(0)
lines.extend([
'zone "'+ domain +'" {',
' type master;',
' file "'+ zone_dir + domain +'.db"; ',
' update-policy {',
' grant dynette. wildcard *.'+ domain +'. ANY;',
])
# we have consume all the jobs, flush it
# because we are in a SQL transaction we won't have situation where a
# job could be added just after we read them all
psql.execute("DELETE FROM jobqueues;")
# Get registered sub-domains
result = json.loads(str(urlopen(url +'/all/'+ domain).read()))
for entry in result:
for subd, type in allowed_operations.items():
if subd == '.': subd = ''
lines.append(' grant '+ entry['subdomain'] +'. name '+ subd + entry['subdomain'] +'. ' + ' '.join(type) +';')
# Loop through Dynette servers
for url in subs_urls:
lines.extend([
' };',
'};'
'',
])
for entry in result:
lines.extend([
'key '+ entry['subdomain'] +'. {',
' algorithm ' + entry['key_algo'] + ';',
' secret "'+ entry['public_key'] +'";',
'};',
'key dynette. {',
' algorithm hmac-md5;',
' secret "'+ master_key +'";',
'};',
])
# Backup old Bind configuration file.
os.system('cp '+ conf_file +' '+ conf_file +'.back')
# Get available DynDNS domains
domains = json.loads(str(urlopen(url +'/domains').read()))
for domain in domains:
# Write Bind configuration file.
with open(conf_file, 'w') as zone:
zone.write('\n'.join(lines) + '\n')
# Create zone database if not present
if not os.path.exists(zone_dir + domain +'.db'):
db_lines = [
'$ORIGIN .',
'$TTL 10 ; 10 seconds',
domain+'. IN SOA '+ ns0 +'. '+ rname +'. (',
' 18 ; serial',
' 10800 ; refresh (3 hours)',
' 3600 ; retry (1 hour)',
' 604800 ; expire (1 week)',
' 10 ; minimum (10 seconds)',
' )',
'$TTL 3600 ; 1 hour',
' NS '+ ns0 +'.',
' NS '+ ns1 +'.',
'',
'$ORIGIN '+ domain +'.',
]
with open(zone_dir + domain +'.db', 'w') as zone:
for line in db_lines:
zone.write(line + '\n')
# Restore ownership
os.system('chown -R bind:bind '+ zone_dir +' '+ conf_file)
lines.extend([
'zone "'+ domain +'" {',
' type master;',
' file "'+ zone_dir + domain +'.db"; ',
' update-policy {',
' grant dynette. wildcard *.'+ domain +'. ANY;',
])
# Reload Bind
if os.system('/usr/sbin/rndc reload') == 0:
exit(0)
else:
os.system('cp '+ conf_file +' '+ conf_file +'.bad')
os.system('cp '+ conf_file +'.back '+ conf_file)
os.system('/usr/sbin/rndc reload')
print("An error occured ! Please check daemon.log and your conf.bad")
exit(1)
# Get registered sub-domains
result = json.loads(str(urlopen(url +'/all/'+ domain).read()))
for entry in result:
for subd, type in allowed_operations.items():
if subd == '.': subd = ''
lines.append(' grant '+ entry['subdomain'] +'. name '+ subd + entry['subdomain'] +'. ' + ' '.join(type) +';')
# mein got this is so awful
if os.path.exists('/tmp/dynette_flush_bind_cache'):
os.system('/usr/sbin/rndc flush')
os.system('/usr/sbin/rndc reload')
os.system('rm /tmp/dynette_flush_bind_cache')
lines.extend([
' };',
'};'
'',
])
for entry in result:
lines.extend([
'key '+ entry['subdomain'] +'. {',
' algorithm ' + entry['key_algo'] + ';',
' secret "'+ entry['public_key'] +'";',
'};',
])
# update bind9 zone
if need_rewrite:
# Backup old Bind configuration file.
os.system('cp '+ conf_file +' '+ conf_file +'.back')
# Write Bind configuration file.
with open(conf_file, 'w') as zone:
zone.write('\n'.join(lines) + '\n')
# Restore ownership
os.system('chown -R bind:bind '+ zone_dir +' '+ conf_file)
# Reload Bind
if os.system('/usr/sbin/rndc reload') != 0:
os.system('cp '+ conf_file +' '+ conf_file +'.bad')
os.system('cp '+ conf_file +'.back '+ conf_file)
os.system('/usr/sbin/rndc reload')
print("An error occured ! Please check daemon.log and your conf.bad")
sys.exit(1)
# flush bind9 cache (mostly because we got a hmac-sha512 key migration
if need_bind9_cache_flush:
os.system('/usr/sbin/rndc flush')
os.system('/usr/sbin/rndc reload')

View file

@ -68,6 +68,25 @@ class Ipban
property :ip_addr, String, :key => true
end
################
### JobQueue ###
################
# JobQueue to communicate with the conf updater
class Jobqueue
include DataMapper::Resource
property :id, Serial, :key => true
property :task, String
end
def schedule_conf_rewrite()
Jobqueue.create(:task => "conf_rewrite")
end
def schedule_bind9_cache_flush()
Jobqueue.create(:task => "bind9_cache_flush")
end
################
### Handlers ###
@ -198,6 +217,7 @@ post '/key/:public_key' do
entry.ips << Ip.create(:ip_addr => request.ip)
if entry.save
schedule_conf_rewrite
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
else
halt 412, { :error => "A problem occured during DNS registration" }.to_json
@ -237,11 +257,8 @@ put '/migrate_key_to_sha512/' do
halt 412, { :error => "A problem occured during key algo migration" }.to_json
end
# I don't have any other way of communicating with this dynette.cron.py
# this is awful
File.open("/tmp/dynette_flush_bind_cache", "w").close
# let's try flusing here, hope that could help ... (this design is so awful)
`/usr/sbin/rndc flush`
schedule_conf_rewrite
schedule_bind9_cache_flush
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
end
@ -255,6 +272,7 @@ put '/key/:public_key' do
end
entry.current_ip = request.ip
if entry.save
schedule_conf_rewrite
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
else
halt 412, { :error => "A problem occured during DNS update" }.to_json
@ -270,6 +288,7 @@ delete '/key/:public_key' do
if entry = Entry.first(:public_key => params[:public_key])
Ip.first(:entry_id => entry.id).destroy
if entry.destroy
schedule_conf_rewrite
halt 200, "OK".to_json
else
halt 412, { :error => "A problem occured during DNS deletion" }.to_json
@ -296,6 +315,7 @@ delete '/domains/:subdomain' do
Ip.first(:entry_id => entry.id).destroy
if entry.destroy
schedule_conf_rewrite
halt 200, "OK".to_json
else
halt 412, { :error => "A problem occured during DNS deletion" }.to_json