mirror of
https://github.com/YunoHost/dynette.git
synced 2024-09-03 20:06:17 +02:00
Compare commits
No commits in common. "v1.0.0" and "master" have entirely different histories.
17 changed files with 440 additions and 523 deletions
35
.github/workflows/autoblack.yml
vendored
Normal file
35
.github/workflows/autoblack.yml
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
name: Check / auto apply Black
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
black:
|
||||
name: Check / auto apply black
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check files using the black formatter
|
||||
uses: psf/black@stable
|
||||
id: black
|
||||
with:
|
||||
options: "."
|
||||
continue-on-error: true
|
||||
- shell: pwsh
|
||||
id: check_files_changed
|
||||
run: |
|
||||
# Diff HEAD with the previous commit
|
||||
$diff = git diff
|
||||
$HasDiff = $diff.Length -gt 0
|
||||
Write-Host "::set-output name=files_changed::$HasDiff"
|
||||
- name: Create Pull Request
|
||||
if: steps.check_files_changed.outputs.files_changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
title: "Format Python code with Black"
|
||||
commit-message: ":art: Format Python code with Black"
|
||||
body: |
|
||||
This pull request uses the [psf/black](https://github.com/psf/black) formatter.
|
||||
base: ${{ github.head_ref }} # Creates pull request onto pull request or commit branch
|
||||
branch: actions/black
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -1 +1,2 @@
|
|||
*.swp
|
||||
venv
|
||||
__pycache__
|
||||
|
|
8
Gemfile
8
Gemfile
|
@ -1,8 +0,0 @@
|
|||
source 'https://rubygems.org'
|
||||
|
||||
gem 'sinatra'
|
||||
gem 'thin'
|
||||
gem 'json'
|
||||
gem 'data_mapper'
|
||||
gem 'dm-postgres-adapter'
|
||||
gem 'pg'
|
85
Gemfile.lock
85
Gemfile.lock
|
@ -1,85 +0,0 @@
|
|||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
addressable (2.3.4)
|
||||
bcrypt-ruby (3.0.1)
|
||||
daemons (1.1.9)
|
||||
data_mapper (1.2.0)
|
||||
dm-aggregates (~> 1.2.0)
|
||||
dm-constraints (~> 1.2.0)
|
||||
dm-core (~> 1.2.0)
|
||||
dm-migrations (~> 1.2.0)
|
||||
dm-serializer (~> 1.2.0)
|
||||
dm-timestamps (~> 1.2.0)
|
||||
dm-transactions (~> 1.2.0)
|
||||
dm-types (~> 1.2.0)
|
||||
dm-validations (~> 1.2.0)
|
||||
data_objects (0.10.13)
|
||||
addressable (~> 2.1)
|
||||
dm-aggregates (1.2.0)
|
||||
dm-core (~> 1.2.0)
|
||||
dm-constraints (1.2.0)
|
||||
dm-core (~> 1.2.0)
|
||||
dm-core (1.2.1)
|
||||
addressable (~> 2.3)
|
||||
dm-do-adapter (1.2.0)
|
||||
data_objects (~> 0.10.6)
|
||||
dm-core (~> 1.2.0)
|
||||
dm-migrations (1.2.0)
|
||||
dm-core (~> 1.2.0)
|
||||
dm-postgres-adapter (1.2.0)
|
||||
dm-do-adapter (~> 1.2.0)
|
||||
do_postgres (~> 0.10.6)
|
||||
dm-serializer (1.2.2)
|
||||
dm-core (~> 1.2.0)
|
||||
fastercsv (~> 1.5)
|
||||
json (~> 1.6)
|
||||
json_pure (~> 1.6)
|
||||
multi_json (~> 1.0)
|
||||
dm-timestamps (1.2.0)
|
||||
dm-core (~> 1.2.0)
|
||||
dm-transactions (1.2.0)
|
||||
dm-core (~> 1.2.0)
|
||||
dm-types (1.2.2)
|
||||
bcrypt-ruby (~> 3.0)
|
||||
dm-core (~> 1.2.0)
|
||||
fastercsv (~> 1.5)
|
||||
json (~> 1.6)
|
||||
multi_json (~> 1.0)
|
||||
stringex (~> 1.4)
|
||||
uuidtools (~> 2.1)
|
||||
dm-validations (1.2.0)
|
||||
dm-core (~> 1.2.0)
|
||||
do_postgres (0.10.13)
|
||||
data_objects (= 0.10.13)
|
||||
eventmachine (1.0.3)
|
||||
fastercsv (1.5.5)
|
||||
json (1.8.0)
|
||||
json_pure (1.8.0)
|
||||
multi_json (1.7.7)
|
||||
pg (0.15.1)
|
||||
rack (1.5.2)
|
||||
rack-protection (1.5.0)
|
||||
rack
|
||||
sinatra (1.4.3)
|
||||
rack (~> 1.4)
|
||||
rack-protection (~> 1.4)
|
||||
tilt (~> 1.3, >= 1.3.4)
|
||||
stringex (1.5.1)
|
||||
thin (1.5.1)
|
||||
daemons (>= 1.0.9)
|
||||
eventmachine (>= 0.12.6)
|
||||
rack (>= 1.0.0)
|
||||
tilt (1.4.1)
|
||||
uuidtools (2.1.4)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
data_mapper
|
||||
dm-postgres-adapter
|
||||
json
|
||||
pg
|
||||
sinatra
|
||||
thin
|
1
Procfile
1
Procfile
|
@ -1 +0,0 @@
|
|||
web: bundle exec ruby dynette.rb -p $PORT
|
185
README.md
185
README.md
|
@ -1,122 +1,111 @@
|
|||
YunoHost DynDNS Server
|
||||
======================
|
||||
|
||||
|
||||
**Note: Tested on Debian wheezy (should work on Ubuntu)**
|
||||
|
||||
### Setup
|
||||
|
||||
```bash
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
pip3 install -r requirements.txt
|
||||
```
|
||||
git clone https://github.com/YunoHost/dynette
|
||||
|
||||
### Dev
|
||||
|
||||
|
||||
```bash
|
||||
FLASK_APP=app.py flask run
|
||||
```
|
||||
|
||||
|
||||
Web subscribe server deployment
|
||||
-------------------------------
|
||||
```
|
||||
apt-get install postgresql ruby thin libpq-dev bundler apache2
|
||||
```
|
||||
### Production
|
||||
|
||||
In dynette repository:
|
||||
```
|
||||
bundle install
|
||||
```
|
||||
- You should also install bind9
|
||||
- Include `/etc/bind/named.conf.local` in `/etc/bind/named.conf`
|
||||
- Install the following services
|
||||
|
||||
Thin configuration:
|
||||
```
|
||||
thin config -C /etc/thin1.9.1/dynette.yml -c /path/to/dynette/ --servers 3 -p 5000 -e production
|
||||
```
|
||||
|
||||
Apache configuration:
|
||||
```
|
||||
vim /etc/apache2/sites-available/dynette
|
||||
```
|
||||
|
||||
Paste & change server name in below configuration:
|
||||
```
|
||||
<VirtualHost *:80>
|
||||
ServerName dyndns.yunohost.org
|
||||
|
||||
RewriteEngine On
|
||||
|
||||
<Proxy balancer://thinservers>
|
||||
BalancerMember http://127.0.0.1:5000
|
||||
BalancerMember http://127.0.0.1:5001
|
||||
BalancerMember http://127.0.0.1:5002
|
||||
</Proxy>
|
||||
|
||||
# Redirect all non-static requests to thin
|
||||
RewriteCond %{DOCUMENT_ROOT}/%{REQUEST_FILENAME} !-f
|
||||
RewriteRule ^/(.*)$ balancer://thinservers%{REQUEST_URI} [P,QSA,L]
|
||||
|
||||
ProxyPass / balancer://thinservers/
|
||||
ProxyPassReverse / balancer://thinservers/
|
||||
ProxyPreserveHost on
|
||||
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
|
||||
# Custom log file locations
|
||||
ErrorLog /var/log/apache2/dynette-error.log
|
||||
CustomLog /var/log/apache2/dynette-access.log combined
|
||||
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
PostgreSQL configuration:
|
||||
```
|
||||
# adduser dynette
|
||||
# passwd dynette
|
||||
# su - postgres
|
||||
$ psql template1
|
||||
template1=# CREATE USER dynette WITH PASSWORD 'myPassword';
|
||||
template1=# CREATE DATABASE dynette;
|
||||
template1=# GRANT ALL PRIVILEGES ON DATABASE dynette to dynette;
|
||||
template1=# \q
|
||||
```
|
||||
|
||||
Edit dynette.rb, change PostgreSQL password and domains handled, line 11-12:
|
||||
```
|
||||
DataMapper.setup(:default, ENV['DATABASE_URL'] || "postgres://dynette:myPassword@localhost/dynette")
|
||||
DOMAINS = ["nohost.me", "noho.st"]
|
||||
```
|
||||
|
||||
Enable apache2 sites & modules:
|
||||
```
|
||||
a2enmod proxy
|
||||
a2enmod rewrite
|
||||
a2ensite dynette
|
||||
service thin start
|
||||
service apache2 restart
|
||||
```
|
||||
|
||||
|
||||
DNS configuration
|
||||
-----------------
|
||||
##### `dynette.service`
|
||||
|
||||
```
|
||||
apt-get install bind9 python
|
||||
# Systemd config
|
||||
[Unit]
|
||||
Description=Dynette gunicorn daemon
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
PIDFile=/run/gunicorn/dynette-pid
|
||||
User=dynette
|
||||
Group=dynette
|
||||
WorkingDirectory=/var/www/dynette
|
||||
ExecStart=/var/www/dynette/venv/bin/gunicorn -c /var/www/dynette/gunicorn.py wsgi:app
|
||||
ExecReload=/bin/kill -s HUP $MAINPID
|
||||
ExecStop=/bin/kill -s TERM $MAINPID
|
||||
PrivateTmp=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Edit dynette.cron.py and change settings:
|
||||
##### `dynette-regen-named-conf.service`
|
||||
|
||||
```
|
||||
subs_urls = ['http://dyndns.yunohost.org']
|
||||
ns1 = 'dynhost.yunohost.org'
|
||||
ns2 = 'hostmaster.yunohost.org'
|
||||
[Unit]
|
||||
Description=Dynette named.conf regen
|
||||
After=network.target
|
||||
StartLimitIntervalSec=10
|
||||
StartLimitBurst=5
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
WorkingDirectory=/var/www/dynette
|
||||
ExecStart=/var/www/dynette/venv/bin/python3 /var/www/dynette/regen_named_conf.py
|
||||
User=root
|
||||
Group=root
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Create dynette log file
|
||||
##### `dynette-regen-named-conf.path`
|
||||
|
||||
```
|
||||
touch /var/log/dynette.log
|
||||
[Path]
|
||||
Unit=dynette-regen-named-conf.service
|
||||
PathChanged=/var/dynette/db/
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Enable cronjob for dynette (crontab -e)
|
||||
##### NGINX conf snippet
|
||||
|
||||
```
|
||||
* * * * * /path/to/dynette/dynette.cron.py >> /var/log/dynette.log 2>&1
|
||||
location / {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_redirect off;
|
||||
proxy_pass http://unix:/var/www/dynette/sock;
|
||||
proxy_read_timeout 210s;
|
||||
}
|
||||
```
|
||||
|
||||
### If we ever decide to add another base domain
|
||||
|
||||
Troobleshooting
|
||||
---------------
|
||||
We should initialize `/var/lib/bind/BASE_DOMAIN.db` (replace `BASE_DOMAIN` with e.g. nohost.me) with:
|
||||
|
||||
If you run into troubles running the DNS server, try to check permissions on /var/lib/bind and check if bind listens on 0.0.0.0 (in /etc/bind/bind.conf.options)
|
||||
```text
|
||||
$ORIGIN .
|
||||
$TTL 10 ; 10 seconds
|
||||
BASE_DOMAIN IN SOA ns0.yunohost.org. hostmaster.yunohost.org. (
|
||||
1006380 ; serial
|
||||
10800 ; refresh (3 hours)
|
||||
3600 ; retry (1 hour)
|
||||
604800 ; expire (1 week)
|
||||
10 ; minimum (10 seconds)
|
||||
)
|
||||
$TTL 3600 ; 1 hour
|
||||
NS ns0.yunohost.org.
|
||||
NS ns1.yunohost.org.
|
||||
$ORIGIN BASE_DOMAIN.
|
||||
```
|
||||
|
|
0
__init__.py
Normal file
0
__init__.py
Normal file
224
app.py
Normal file
224
app.py
Normal file
|
@ -0,0 +1,224 @@
|
|||
import hmac
|
||||
import base64
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
import bcrypt
|
||||
|
||||
from flask import Flask, jsonify, request
|
||||
from flask_limiter import Limiter
|
||||
from flask_limiter.util import get_remote_address
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
|
||||
DOMAIN_REGEX = re.compile(
|
||||
r"^([a-z0-9]{1}([a-z0-9\-]*[a-z0-9])*)(\.[a-z0-9]{1}([a-z0-9\-]*[a-z0-9])*)*(\.[a-z]{1}([a-z0-9\-]*[a-z0-9])*)$"
|
||||
)
|
||||
|
||||
def trusted_ip():
|
||||
# This is for example the CI, or developers testing new developments
|
||||
return request.environ.get("HTTP_X_FORWARDED_HOST") in app.config.get("LIMIT_EXEMPTED_IPS", [])
|
||||
|
||||
app = Flask(__name__)
|
||||
app.config.from_file("config.yml", load=yaml.safe_load)
|
||||
# cf. https://flask-limiter.readthedocs.io/en/stable/recipes.html#deploying-an-application-behind-a-proxy
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1)
|
||||
limiter = Limiter(
|
||||
get_remote_address,
|
||||
app=app,
|
||||
default_limits=["50 per hour"],
|
||||
#storage_uri="memory://", # <- For development
|
||||
storage_uri="redis://localhost:6379",
|
||||
storage_options={"socket_connect_timeout": 30},
|
||||
strategy="fixed-window", # or "moving-window"
|
||||
application_limits_exempt_when=trusted_ip,
|
||||
default_limits_exempt_when=trusted_ip,
|
||||
)
|
||||
|
||||
assert os.path.isdir(
|
||||
app.config["DB_FOLDER"]
|
||||
), "You should create the DB folder declared in the config"
|
||||
|
||||
|
||||
def _validate_domain(domain):
|
||||
|
||||
if not DOMAIN_REGEX.match(domain):
|
||||
return {"error": f"This is not a valid domain: {domain}"}, 400
|
||||
|
||||
if (
|
||||
len(domain.split(".")) != 3
|
||||
or domain.split(".", 1)[-1] not in app.config["DOMAINS"]
|
||||
):
|
||||
return {"error": "This subdomain is not handled by this dynette server."}, 400
|
||||
|
||||
|
||||
def _is_available(domain):
|
||||
|
||||
return not os.path.exists(f"{app.config['DB_FOLDER']}/{domain}.key")
|
||||
|
||||
|
||||
@app.route("/")
|
||||
@limiter.exempt
|
||||
def home():
|
||||
return "Wanna play the dynette?"
|
||||
|
||||
|
||||
@app.route("/domains")
|
||||
@limiter.exempt
|
||||
def domains():
|
||||
return jsonify(app.config["DOMAINS"]), 200
|
||||
|
||||
|
||||
@app.route("/test/<domain>")
|
||||
@limiter.limit("50 per hour", exempt_when=trusted_ip)
|
||||
def availability(domain):
|
||||
|
||||
error = _validate_domain(domain)
|
||||
if error:
|
||||
return error
|
||||
|
||||
if _is_available(domain):
|
||||
return f'"Domain {domain} is available"', 200
|
||||
else:
|
||||
return {"error": f"Subdomain already taken: {domain}"}, 409
|
||||
|
||||
|
||||
@app.route("/key/<key>", methods=["POST"])
|
||||
@limiter.limit("5 per hour", exempt_when=trusted_ip)
|
||||
def register(key):
|
||||
|
||||
try:
|
||||
key = base64.b64decode(key).decode()
|
||||
except Exception as e:
|
||||
return {"error": "Key format is invalid"}, 400
|
||||
else:
|
||||
if len(key) != 89:
|
||||
return {"error": "Key format is invalid"}, 400
|
||||
|
||||
try:
|
||||
data = dict(request.form) # get_json(force=True)
|
||||
subdomain = data.get("subdomain")
|
||||
assert isinstance(subdomain, str)
|
||||
except Exception as e:
|
||||
return {"error": f"Invalid request: {str(request.form)}"}, 400
|
||||
|
||||
error = _validate_domain(subdomain)
|
||||
if error:
|
||||
return error
|
||||
|
||||
if not _is_available(subdomain):
|
||||
return {"error": f"Subdomain already taken: {subdomain}"}, 409
|
||||
|
||||
recovery_password = data.get("recovery_password")
|
||||
if recovery_password and isinstance(recovery_password, str):
|
||||
if len(recovery_password) < 8:
|
||||
return {"error": "Recovery password too short"}, 409
|
||||
if len(recovery_password) > 1024:
|
||||
return {"error": "Recovery password too long"}, 409
|
||||
|
||||
recovery_password = bcrypt.hashpw(
|
||||
password=recovery_password.encode(), salt=bcrypt.gensalt(14)
|
||||
)
|
||||
recovery_password = base64.b64encode(recovery_password).decode()
|
||||
|
||||
with open(f"{app.config['DB_FOLDER']}/{subdomain}.key", "w") as f:
|
||||
f.write(key)
|
||||
|
||||
if recovery_password:
|
||||
with open(f"{app.config['DB_FOLDER']}/{subdomain}.recovery_password", "w") as f:
|
||||
f.write(recovery_password)
|
||||
|
||||
return '"OK"', 201
|
||||
|
||||
|
||||
@app.route("/domains/<subdomain>", methods=["DELETE"])
|
||||
@limiter.limit("5 per hour", exempt_when=trusted_ip)
|
||||
def delete_using_recovery_password_or_key(subdomain):
|
||||
|
||||
try:
|
||||
assert isinstance(subdomain, str)
|
||||
data = dict(request.form) # get_json(force=True)
|
||||
recovery_password = data.get("recovery_password")
|
||||
key = data.get("key")
|
||||
assert (recovery_password and isinstance(recovery_password, str)) or (
|
||||
key and isinstance(key, str)
|
||||
)
|
||||
if key:
|
||||
key = base64.b64decode(key).decode()
|
||||
except Exception:
|
||||
return {"error": "Invalid request"}, 400
|
||||
|
||||
error = _validate_domain(subdomain)
|
||||
if error:
|
||||
return error
|
||||
|
||||
if _is_available(subdomain):
|
||||
return {"error": "Subdomain already deleted"}, 409
|
||||
|
||||
if key:
|
||||
with open(f"{app.config['DB_FOLDER']}/{subdomain}.key") as f:
|
||||
if not hmac.compare_digest(key, f.read()):
|
||||
return '"Access denied"', 403
|
||||
elif recovery_password:
|
||||
if not os.path.exists(
|
||||
f"{app.config['DB_FOLDER']}/{subdomain}.recovery_password"
|
||||
):
|
||||
return '"Access denied"', 403
|
||||
with open(f"{app.config['DB_FOLDER']}/{subdomain}.recovery_password") as f:
|
||||
hashed = base64.b64decode(f.read())
|
||||
|
||||
if not bcrypt.checkpw(recovery_password.encode(), hashed):
|
||||
return '"Access denied"', 403
|
||||
# Shouldnt happen, this is checked before
|
||||
else:
|
||||
return '"Access denied"', 403
|
||||
|
||||
if os.path.exists(f"{app.config['DB_FOLDER']}/{subdomain}.key"):
|
||||
os.remove(f"{app.config['DB_FOLDER']}/{subdomain}.key")
|
||||
if os.path.exists(f"{app.config['DB_FOLDER']}/{subdomain}.recovery_password"):
|
||||
os.remove(f"{app.config['DB_FOLDER']}/{subdomain}.recovery_password")
|
||||
|
||||
return '"OK"', 200
|
||||
|
||||
|
||||
@app.route("/domains/<subdomain>/recovery_password", methods=["PUT"])
|
||||
@limiter.limit("5 per hour", exempt_when=trusted_ip)
|
||||
def set_recovery_password_using_key(subdomain):
|
||||
|
||||
try:
|
||||
assert isinstance(subdomain, str)
|
||||
data = dict(request.form) # get_json(force=True)
|
||||
recovery_password = data.get("recovery_password")
|
||||
key = data.get("key")
|
||||
assert (recovery_password and isinstance(recovery_password, str)) and (
|
||||
key and isinstance(key, str)
|
||||
)
|
||||
if key:
|
||||
key = base64.b64decode(key).decode()
|
||||
except Exception:
|
||||
return {"error": "Invalid request"}, 400
|
||||
|
||||
error = _validate_domain(subdomain)
|
||||
if error:
|
||||
return error
|
||||
|
||||
if _is_available(subdomain):
|
||||
return {"error": "Subdomain not registered"}, 404
|
||||
|
||||
with open(f"{app.config['DB_FOLDER']}/{subdomain}.key") as f:
|
||||
if not hmac.compare_digest(key, f.read()):
|
||||
return '"Access denied"', 403
|
||||
|
||||
if len(recovery_password) < 8:
|
||||
return {"error": "Recovery password too short"}, 409
|
||||
if len(recovery_password) > 1024:
|
||||
return {"error": "Recovery password too long"}, 409
|
||||
|
||||
recovery_password = bcrypt.hashpw(
|
||||
password=recovery_password.encode(), salt=bcrypt.gensalt(14)
|
||||
)
|
||||
recovery_password = base64.b64encode(recovery_password).decode()
|
||||
|
||||
with open(f"{app.config['DB_FOLDER']}/{subdomain}.recovery_password", "w") as f:
|
||||
f.write(recovery_password)
|
||||
|
||||
return '"OK"', 200
|
|
@ -1,2 +0,0 @@
|
|||
require './dynette.rb'
|
||||
run Sinatra::Application.run!
|
3
config.yml.example
Normal file
3
config.yml.example
Normal file
|
@ -0,0 +1,3 @@
|
|||
DOMAINS: [nohost.me, noho.st, ynh.fr]
|
||||
DB_FOLDER: /var/dynette/db/
|
||||
LIMIT_EXEMPTED_IPS: []
|
|
@ -1,93 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
### Configuration ###
|
||||
|
||||
conf_file = '/etc/bind/named.conf.local' # Include this filename in '/etc/bind/named.conf'
|
||||
zone_dir = '/var/lib/bind/' # Do not forget the trailing '/'
|
||||
subs_urls = ['http://dyndns.yunohost.org'] # 127.0.0.1 if you install subscribe server locally
|
||||
ns1 = 'dynhost.yunohost.org' # Name servers
|
||||
ns2 = 'hostmaster.yunohost.org'
|
||||
|
||||
allowed_operations = {
|
||||
'.' : ['A', 'AAAA', 'TXT', 'MX'],
|
||||
'pubsub.' : ['A', 'AAAA'],
|
||||
'muc.' : ['A', 'AAAA'],
|
||||
'vjud.' : ['A', 'AAAA'],
|
||||
'_xmpp-client._tcp.' : ['SRV'],
|
||||
'_xmpp-server._tcp.' : ['SRV']
|
||||
}
|
||||
|
||||
|
||||
### Script ###
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
from urllib import urlopen
|
||||
|
||||
lines = ['// Generated by Dynette CRON']
|
||||
for url in subs_urls:
|
||||
domains = json.loads(str(urlopen(url +'/domains').read()))
|
||||
|
||||
for domain in domains:
|
||||
result = json.loads(str(urlopen(url +'/all/'+ domain).read()))
|
||||
if not os.path.exists(zone_dir + domain +'.db'):
|
||||
db_lines = [
|
||||
'$ORIGIN .',
|
||||
'$TTL 10 ; 10 seconds',
|
||||
domain+'. IN SOA '+ ns1 +'. '+ ns2 +'. (',
|
||||
' 18 ; serial',
|
||||
' 10800 ; refresh (3 hours)',
|
||||
' 3600 ; retry (1 hour)',
|
||||
' 604800 ; expire (1 week)',
|
||||
' 10 ; minimum (10 seconds)',
|
||||
' )',
|
||||
'$TTL 3600 ; 1 hour',
|
||||
' NS '+ ns1 +'.',
|
||||
' NS '+ ns2 +'.',
|
||||
'',
|
||||
'$ORIGIN '+ domain +'.',
|
||||
]
|
||||
with open(zone_dir + domain +'.db', 'w') as zone:
|
||||
for line in db_lines:
|
||||
zone.write(line + '\n')
|
||||
lines.extend([
|
||||
'zone "'+ domain +'" {',
|
||||
' type master;',
|
||||
' file "'+ zone_dir + domain +'.db"; ',
|
||||
' update-policy {',
|
||||
])
|
||||
|
||||
for entry in result:
|
||||
for subd, type in allowed_operations.items():
|
||||
if subd == '.': subd = ''
|
||||
lines.append(' grant '+ entry['subdomain'] +'. name '+ subd + entry['subdomain'] +'. ' + ' '.join(type) +';')
|
||||
|
||||
lines.extend([
|
||||
' };',
|
||||
'};',
|
||||
])
|
||||
|
||||
for entry in result:
|
||||
lines.extend([
|
||||
'key '+ entry['subdomain'] +'. {',
|
||||
' algorithm hmac-md5;',
|
||||
' secret "'+ entry['public_key'] +'";',
|
||||
'};',
|
||||
])
|
||||
|
||||
os.system('cp '+ conf_file +' '+ conf_file +'.back')
|
||||
|
||||
with open(conf_file, 'w') as zone:
|
||||
for line in lines:
|
||||
zone.write(line + '\n')
|
||||
|
||||
os.system('chown -R bind:bind '+ zone_dir +' '+ conf_file)
|
||||
if os.system('/usr/sbin/rndc reload') == 0:
|
||||
exit(0)
|
||||
else:
|
||||
os.system('cp '+ conf_file +' '+ conf_file +'.bad')
|
||||
os.system('cp '+ conf_file +'.back '+ conf_file)
|
||||
os.system('/usr/sbin/rndc reload')
|
||||
print("An error occured ! Please check daemon.log and your conf.bad")
|
||||
exit(1)
|
235
dynette.rb
235
dynette.rb
|
@ -1,235 +0,0 @@
|
|||
#!/usr/bin/ruby
|
||||
|
||||
require 'rubygems'
|
||||
require 'sinatra'
|
||||
require 'data_mapper'
|
||||
require 'json'
|
||||
require 'base64'
|
||||
|
||||
set :port, 5000
|
||||
DataMapper.setup(:default, ENV['DATABASE_URL'] || "postgres://dynette:myPassword@localhost/dynette")
|
||||
DOMAINS = ["nohost.me", "noho.st"]
|
||||
ALLOWED_IP = ["127.0.0.1"]
|
||||
|
||||
class Entry
|
||||
include DataMapper::Resource
|
||||
|
||||
property :id, Serial
|
||||
property :public_key, String
|
||||
property :subdomain, String
|
||||
property :current_ip, String
|
||||
property :created_at, DateTime
|
||||
|
||||
has n, :ips
|
||||
end
|
||||
|
||||
class Ip
|
||||
include DataMapper::Resource
|
||||
|
||||
property :id, Serial
|
||||
property :ip_addr, String
|
||||
|
||||
belongs_to :entry
|
||||
end
|
||||
|
||||
class Iplog
|
||||
include DataMapper::Resource
|
||||
|
||||
property :ip_addr, String, :key => true
|
||||
property :visited_at, DateTime
|
||||
end
|
||||
|
||||
class Ipban
|
||||
include DataMapper::Resource
|
||||
|
||||
property :ip_addr, String, :key => true
|
||||
end
|
||||
|
||||
not_found do
|
||||
content_type :json
|
||||
halt 404, { :error => "Not found" }.to_json
|
||||
end
|
||||
|
||||
before do
|
||||
if Ipban.first(:ip_addr => request.ip)
|
||||
halt 410, "Your ip is banned from the service"
|
||||
end
|
||||
unless %w[domains test all ban unban].include? request.path_info.split('/')[1]
|
||||
if iplog = Iplog.last(:ip_addr => request.ip)
|
||||
if iplog.visited_at.to_time > Time.now - 30
|
||||
halt 410, "Please wait 30sec\n"
|
||||
else
|
||||
iplog.update(:visited_at => Time.now)
|
||||
end
|
||||
else
|
||||
Iplog.create(:ip_addr => request.ip, :visited_at => Time.now)
|
||||
end
|
||||
end
|
||||
content_type :json
|
||||
end
|
||||
|
||||
# Check params
|
||||
['/test/:subdomain', '/key/:public_key', '/ips/:public_key', '/ban/:ip', '/unban/:ip' ].each do |path|
|
||||
before path do
|
||||
if params.has_key?("public_key")
|
||||
public_key = Base64.decode64(params[:public_key].encode('ascii-8bit'))
|
||||
unless public_key.length == 24
|
||||
halt 400, { :error => "Key is invalid: #{public_key.to_s.encode('UTF-8', {:invalid => :replace, :undef => :replace, :replace => '?'})}" }.to_json
|
||||
end
|
||||
end
|
||||
if params.has_key?("subdomain")
|
||||
unless params[:subdomain].match /^([a-z0-9]{1}([a-z0-9\-]*[a-z0-9])*)(\.[a-z0-9]{1}([a-z0-9\-]*[a-z0-9])*)*(\.[a-z]{1}([a-z0-9\-]*[a-z0-9])*)$/
|
||||
halt 400, { :error => "Subdomain is invalid: #{params[:subdomain]}" }.to_json
|
||||
end
|
||||
unless DOMAINS.include? params[:subdomain].gsub(params[:subdomain].split('.')[0]+'.', '')
|
||||
halt 400, { :error => "Subdomain #{params[:subdomain]} is not part of available domains: #{DOMAINS.join(', ')}" }.to_json
|
||||
end
|
||||
end
|
||||
if params.has_key?("ip")
|
||||
unless params[:ip].match /^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/
|
||||
halt 400, { :error => "IP is invalid: #{params[:ip]}" }.to_json
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
get '/' do
|
||||
"Wanna play the dynette ?"
|
||||
end
|
||||
|
||||
get '/domains' do
|
||||
headers['Access-Control-Allow-Origin'] = '*'
|
||||
DOMAINS.to_json
|
||||
end
|
||||
|
||||
get '/test/:subdomain' do
|
||||
headers['Access-Control-Allow-Origin'] = '*'
|
||||
if entry = Entry.first(:subdomain => params[:subdomain])
|
||||
halt 409, { :error => "Subdomain already taken: #{entry.subdomain}" }.to_json
|
||||
else
|
||||
halt 200, "Domain #{params[:subdomain]} is available".to_json
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
post '/key/:public_key' do
|
||||
params[:public_key] = Base64.decode64(params[:public_key].encode('ascii-8bit'))
|
||||
# Check params
|
||||
halt 400, { :error => "Please indicate a subdomain" }.to_json unless params.has_key?("subdomain")
|
||||
|
||||
# If already exists
|
||||
if entry = Entry.first(:subdomain => params[:subdomain])
|
||||
halt 409, { :error => "Subdomain already taken: #{entry.subdomain}" }.to_json
|
||||
end
|
||||
if entry = Entry.first(:public_key => params[:public_key])
|
||||
halt 409, { :error => "Key already exists for domain #{entry.subdomain}" }.to_json
|
||||
end
|
||||
|
||||
# Process
|
||||
entry = Entry.new(:public_key => params[:public_key], :subdomain => params[:subdomain], :current_ip => request.ip, :created_at => Time.now)
|
||||
entry.ips << Ip.create(:ip_addr => request.ip)
|
||||
if entry.save
|
||||
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
|
||||
else
|
||||
halt 412, { :error => "A problem occured during DNS registration" }.to_json
|
||||
end
|
||||
end
|
||||
|
||||
put '/key/:public_key' do
|
||||
params[:public_key] = Base64.decode64(params[:public_key].encode('ascii-8bit'))
|
||||
entry = Entry.first(:public_key => params[:public_key])
|
||||
unless request.ip == entry.current_ip
|
||||
entry.ips << Ip.create(:ip_addr => request.ip)
|
||||
end
|
||||
entry.current_ip = request.ip
|
||||
if entry.save
|
||||
halt 201, { :public_key => entry.public_key, :subdomain => entry.subdomain, :current_ip => entry.current_ip }.to_json
|
||||
else
|
||||
halt 412, { :error => "A problem occured during DNS update" }.to_json
|
||||
end
|
||||
end
|
||||
|
||||
delete '/key/:public_key' do
|
||||
unless ALLOWED_IP.include? request.ip
|
||||
status 403
|
||||
return "Access denied"
|
||||
end
|
||||
params[:public_key] = Base64.decode64(params[:public_key].encode('ascii-8bit'))
|
||||
if entry = Entry.first(:public_key => params[:public_key])
|
||||
Ip.first(:entry_id => entry.id).destroy
|
||||
if entry.destroy
|
||||
halt 200, "OK".to_json
|
||||
else
|
||||
halt 412, { :error => "A problem occured during DNS deletion" }.to_json
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
delete '/domains/:subdomain' do
|
||||
unless ALLOWED_IP.include? request.ip
|
||||
status 403
|
||||
return "Access denied"
|
||||
end
|
||||
if entry = Entry.first(:subdomain => params[:subdomain])
|
||||
Ip.first(:entry_id => entry.id).destroy
|
||||
if entry.destroy
|
||||
halt 200, "OK".to_json
|
||||
else
|
||||
halt 412, { :error => "A problem occured during DNS deletion" }.to_json
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
get '/all' do
|
||||
unless ALLOWED_IP.include? request.ip
|
||||
status 403
|
||||
return "Access denied"
|
||||
end
|
||||
Entry.all.to_json
|
||||
end
|
||||
|
||||
get '/all/:domain' do
|
||||
unless ALLOWED_IP.include? request.ip
|
||||
status 403
|
||||
return "Access denied"
|
||||
end
|
||||
result = []
|
||||
Entry.all.each do |entry|
|
||||
result.push(entry) if params[:domain] == entry.subdomain.gsub(entry.subdomain.split('.')[0]+'.', '')
|
||||
end
|
||||
halt 200, result.to_json
|
||||
end
|
||||
|
||||
get '/ips/:public_key' do
|
||||
params[:public_key] = Base64.decode64(params[:public_key].encode('ascii-8bit'))
|
||||
unless ALLOWED_IP.include? request.ip
|
||||
status 403
|
||||
return "Access denied"
|
||||
end
|
||||
ips = []
|
||||
Entry.first(:public_key => params[:public_key]).ips.all.each do |ip|
|
||||
ips.push(ip.ip_addr)
|
||||
end
|
||||
ips.to_json
|
||||
end
|
||||
|
||||
get '/ban/:ip' do
|
||||
unless ALLOWED_IP.include? request.ip
|
||||
status 403
|
||||
return "Access denied"
|
||||
end
|
||||
Ipban.create(:ip_addr => params[:ip])
|
||||
Ipban.all.to_json
|
||||
end
|
||||
|
||||
get '/unban/:ip' do
|
||||
unless ALLOWED_IP.include? request.ip
|
||||
status 403
|
||||
return "Access denied"
|
||||
end
|
||||
Ipban.first(:ip_addr => params[:ip]).destroy
|
||||
Ipban.all.to_json
|
||||
end
|
||||
|
||||
#DataMapper.auto_migrate! # Destroy db content
|
||||
DataMapper.auto_upgrade!
|
11
gunicorn.py
Normal file
11
gunicorn.py
Normal file
|
@ -0,0 +1,11 @@
|
|||
command = "/var/www/dynette/venv/bin/gunicorn"
|
||||
pythonpath = "/var/www/dynette"
|
||||
workers = 4
|
||||
user = "dynette"
|
||||
bind = ['0.0.0.0:9876']
|
||||
pid = "/run/gunicorn/dynette-pid"
|
||||
errorlog = "/var/log/dynette/error.log"
|
||||
accesslog = "/var/log/dynette/access.log"
|
||||
access_log_format = '%({X-Real-IP}i)s %({X-Forwarded-For}i)s %(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
|
||||
loglevel = "warning"
|
||||
capture_output = True
|
24
regen_named_conf.py
Normal file
24
regen_named_conf.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
import os
|
||||
import yaml
|
||||
import glob
|
||||
import jinja2
|
||||
|
||||
config = yaml.safe_load(open("config.yml").read())
|
||||
|
||||
domains = [{"name": domain, "subdomains": []} for domain in config["DOMAINS"]]
|
||||
|
||||
for infos in domains:
|
||||
domain = infos["name"]
|
||||
for f in glob.glob(config["DB_FOLDER"] + f"*.{domain}.key"):
|
||||
key = open(f).read()
|
||||
subdomain = f.split("/")[-1].rsplit(".", 1)[0]
|
||||
infos["subdomains"].append({"name": subdomain, "key": key})
|
||||
|
||||
templateLoader = jinja2.FileSystemLoader(searchpath="./templates/")
|
||||
templateEnv = jinja2.Environment(loader=templateLoader)
|
||||
template = templateEnv.get_template("named.conf.j2")
|
||||
named_conf = template.render(domains=domains)
|
||||
|
||||
open('/etc/bind/named.conf.local', 'w').write(named_conf)
|
||||
os.system('chown -R bind:bind /etc/bind/named.conf.local /var/lib/bind/')
|
||||
os.system('/usr/sbin/rndc reload')
|
23
requirements.txt
Normal file
23
requirements.txt
Normal file
|
@ -0,0 +1,23 @@
|
|||
async-timeout==4.0.2
|
||||
bcrypt==4.0.1
|
||||
click==8.1.3
|
||||
commonmark==0.9.1
|
||||
Deprecated==1.2.13
|
||||
Flask==2.2.2
|
||||
Flask-Limiter==3.5.0
|
||||
gunicorn==20.1.0
|
||||
importlib-metadata==6.0.0
|
||||
itsdangerous==2.1.2
|
||||
Jinja2==3.1.2
|
||||
limits==3.1.6
|
||||
MarkupSafe==2.1.2
|
||||
ordered-set==4.1.0
|
||||
packaging==23.0
|
||||
Pygments==2.14.0
|
||||
PyYAML==6.0
|
||||
redis==4.5.4
|
||||
rich==12.6.0
|
||||
typing-extensions==4.4.0
|
||||
Werkzeug==2.2.3
|
||||
wrapt==1.14.1
|
||||
zipp==3.11.0
|
27
templates/named.conf.j2
Normal file
27
templates/named.conf.j2
Normal file
|
@ -0,0 +1,27 @@
|
|||
{% for domain in domains %}
|
||||
zone "{{ domain.name }}" {
|
||||
type master;
|
||||
file "/var/lib/bind/{{ domain.name }}.db";
|
||||
update-policy {
|
||||
{% for subdomain in domain.subdomains %}
|
||||
grant {{ subdomain.name }}. name {{ subdomain.name }}. A AAAA TXT MX CAA;
|
||||
grant {{ subdomain.name }}. name *.{{ subdomain.name }}. A AAAA;
|
||||
grant {{ subdomain.name }}. name mail._domainkey.{{ subdomain.name }}. TXT;
|
||||
grant {{ subdomain.name }}. name _dmarc.{{ subdomain.name }}. TXT;
|
||||
grant {{ subdomain.name }}. name _xmpp-client._tcp.{{ subdomain.name }}. SRV;
|
||||
grant {{ subdomain.name }}. name _xmpp-server._tcp.{{ subdomain.name }}. SRV;
|
||||
grant {{ subdomain.name }}. name xmpp-upload.{{ subdomain.name }}. A AAAA CNAME;
|
||||
grant {{ subdomain.name }}. name muc.{{ subdomain.name }}. A AAAA CNAME;
|
||||
grant {{ subdomain.name }}. name vjud.{{ subdomain.name }}. A AAAA CNAME;
|
||||
grant {{ subdomain.name }}. name pubsub.{{ subdomain.name }}. A AAAA CNAME;
|
||||
{% endfor %}
|
||||
};
|
||||
};
|
||||
|
||||
{% for subdomain in domain.subdomains %}
|
||||
key {{ subdomain.name }}. {
|
||||
algorithm hmac-sha512;
|
||||
secret "{{ subdomain.key }}";
|
||||
};
|
||||
{% endfor %}
|
||||
{% endfor %}
|
4
wsgi.py
Normal file
4
wsgi.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
from app import app
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run()
|
Loading…
Add table
Reference in a new issue