mirror of
https://github.com/YunoHost/yunohost.git
synced 2024-09-03 20:06:10 +02:00
Merge branch 'unstable' into testing
This commit is contained in:
commit
388f2fa8be
22 changed files with 359 additions and 185 deletions
5
.travis.yml
Normal file
5
.travis.yml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
language: python
|
||||||
|
install: "pip install pytest pyyaml"
|
||||||
|
python:
|
||||||
|
- "2.7"
|
||||||
|
script: "py.test tests"
|
88
CONTRIBUTORS.md
Normal file
88
CONTRIBUTORS.md
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
YunoHost core contributors
|
||||||
|
==========================
|
||||||
|
|
||||||
|
YunoHost is built and maintained by the YunoHost project community.
|
||||||
|
Everyone is encouraged to submit issues and changes, and to contribute in other ways -- see https://yunohost.org/contribute to find out how.
|
||||||
|
|
||||||
|
--
|
||||||
|
|
||||||
|
Initial YunoHost core was built by Kload & beudbeud, for YunoHost v2.
|
||||||
|
|
||||||
|
Most of code was written by Kload and jerome, with help of numerous contributors.
|
||||||
|
|
||||||
|
Translation is made by a bunch of lovely people all over the world.
|
||||||
|
|
||||||
|
We would like to thank anyone who ever helped the YunoHost project <3
|
||||||
|
|
||||||
|
|
||||||
|
YunoHost core Contributors
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
- Jérôme Lebleu
|
||||||
|
- Kload
|
||||||
|
- Laurent 'Bram' Peuch
|
||||||
|
- Julien 'ju' Malik
|
||||||
|
- opi
|
||||||
|
- Aleks
|
||||||
|
- Adrien 'beudbeud' Beudin
|
||||||
|
- M5oul
|
||||||
|
- Valentin 'zamentur' / 'ljf' Grimaud
|
||||||
|
- Jocelyn Delalande
|
||||||
|
- infertux
|
||||||
|
- Taziden
|
||||||
|
- ZeHiro
|
||||||
|
- Josue-T
|
||||||
|
- nahoj
|
||||||
|
- a1ex
|
||||||
|
- JimboJoe
|
||||||
|
- vetetix
|
||||||
|
- jellium
|
||||||
|
- Sebastien 'sebian' Badia
|
||||||
|
- lmangani
|
||||||
|
- Julien Vaubourg
|
||||||
|
|
||||||
|
|
||||||
|
YunoHost core Translators
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
If you want to help translation, please visit https://translate.yunohost.org/projects/yunohost/yunohost/
|
||||||
|
|
||||||
|
|
||||||
|
### Dutch
|
||||||
|
|
||||||
|
- DUBWiSE
|
||||||
|
- marut
|
||||||
|
|
||||||
|
### English
|
||||||
|
|
||||||
|
- Bugsbane
|
||||||
|
|
||||||
|
### French
|
||||||
|
|
||||||
|
- aoz roon
|
||||||
|
- Genma
|
||||||
|
- Jean-Baptiste Holcroft
|
||||||
|
- Jérôme Lebleu
|
||||||
|
|
||||||
|
### German
|
||||||
|
|
||||||
|
- david.bartke
|
||||||
|
- Felix Bartels
|
||||||
|
- Philip Gatzka
|
||||||
|
|
||||||
|
### Hindi
|
||||||
|
|
||||||
|
- Anmol
|
||||||
|
|
||||||
|
### Italian
|
||||||
|
|
||||||
|
- Thomas Bille
|
||||||
|
|
||||||
|
### Portuguese
|
||||||
|
|
||||||
|
- Deleted User
|
||||||
|
|
||||||
|
### Spanish
|
||||||
|
|
||||||
|
- Juanu
|
||||||
|
|
|
@ -9,7 +9,9 @@ do_pre_regen() {
|
||||||
|
|
||||||
install -D -m 644 rmilter.conf \
|
install -D -m 644 rmilter.conf \
|
||||||
"${pending_dir}/etc/rmilter.conf"
|
"${pending_dir}/etc/rmilter.conf"
|
||||||
install -D -m 644 rmilter.socket \
|
# Remove old socket file (we stopped using it, since rspamd 1.3.1)
|
||||||
|
# Regen-conf system need an empty file to delete it
|
||||||
|
install -D -m 644 /dev/null \
|
||||||
"${pending_dir}/etc/systemd/system/rmilter.socket"
|
"${pending_dir}/etc/systemd/system/rmilter.socket"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,17 +39,19 @@ do_post_regen() {
|
||||||
sudo chown _rmilter /etc/dkim/*.mail.key
|
sudo chown _rmilter /etc/dkim/*.mail.key
|
||||||
sudo chmod 400 /etc/dkim/*.mail.key
|
sudo chmod 400 /etc/dkim/*.mail.key
|
||||||
|
|
||||||
|
# fix rmilter socket permission (postfix is chrooted in /var/spool/postfix )
|
||||||
|
sudo mkdir -p /var/spool/postfix/run/rmilter
|
||||||
|
sudo chown -R postfix:_rmilter /var/spool/postfix/run/rmilter
|
||||||
|
sudo chmod g+w /var/spool/postfix/run/rmilter
|
||||||
|
|
||||||
[ -z "$regen_conf_files" ] && exit 0
|
[ -z "$regen_conf_files" ] && exit 0
|
||||||
|
|
||||||
# reload systemd daemon
|
# reload systemd daemon
|
||||||
[[ "$regen_conf_files" =~ rmilter\.socket ]] && {
|
|
||||||
sudo systemctl -q daemon-reload
|
sudo systemctl -q daemon-reload
|
||||||
}
|
|
||||||
|
|
||||||
# ensure that the socket is listening and stop the service - it will be
|
# Restart rmilter due to the rspamd update
|
||||||
# started again by the socket as needed
|
# https://rspamd.com/announce/2016/08/01/rspamd-1.3.1.html
|
||||||
sudo systemctl -q start rmilter.socket
|
sudo systemctl -q restart rmilter.service
|
||||||
sudo systemctl -q stop rmilter.service 2>&1 || true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
FORCE=${2:-0}
|
FORCE=${2:-0}
|
||||||
|
|
|
@ -25,10 +25,9 @@ do_post_regen() {
|
||||||
sudo systemctl restart dovecot
|
sudo systemctl restart dovecot
|
||||||
}
|
}
|
||||||
|
|
||||||
# ensure that the socket is listening and stop the service - it will be
|
# Restart rspamd due to the upgrade
|
||||||
# started again by the socket as needed
|
# https://rspamd.com/announce/2016/08/01/rspamd-1.3.1.html
|
||||||
sudo systemctl -q start rspamd.socket
|
sudo systemctl -q restart rspamd.service
|
||||||
sudo systemctl -q stop rspamd.service 2>&1 || true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
FORCE=${2:-0}
|
FORCE=${2:-0}
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
# (?:::f{4,6}:)?(?P<host>[\w\-.^_]+)
|
# (?:::f{4,6}:)?(?P<host>[\w\-.^_]+)
|
||||||
# Values: TEXT
|
# Values: TEXT
|
||||||
#
|
#
|
||||||
failregex = access.lua:[1-9]+: authenticate\(\): Connection failed for: .*, client: <HOST>
|
failregex = helpers.lua:[1-9]+: authenticate\(\): Connection failed for: .*, client: <HOST>
|
||||||
^<HOST> -.*\"POST /yunohost/api/login HTTP/1.1\" 401 22
|
^<HOST> -.*\"POST /yunohost/api/login HTTP/1.1\" 401 22
|
||||||
|
|
||||||
# Option: ignoreregex
|
# Option: ignoreregex
|
||||||
|
|
|
@ -141,7 +141,7 @@ smtp_reply_filter = pcre:/etc/postfix/smtp_reply_filter
|
||||||
# Rmilter
|
# Rmilter
|
||||||
milter_mail_macros = i {mail_addr} {client_addr} {client_name} {auth_authen}
|
milter_mail_macros = i {mail_addr} {client_addr} {client_name} {auth_authen}
|
||||||
milter_protocol = 6
|
milter_protocol = 6
|
||||||
smtpd_milters = inet:localhost:11000
|
smtpd_milters = unix:/run/rmilter/rmilter.sock
|
||||||
|
|
||||||
# Skip email without checking if milter has died
|
# Skip email without checking if milter has died
|
||||||
milter_default_action = accept
|
milter_default_action = accept
|
||||||
|
|
|
@ -5,8 +5,7 @@
|
||||||
# pidfile - path to pid file
|
# pidfile - path to pid file
|
||||||
pidfile = /run/rmilter/rmilter.pid;
|
pidfile = /run/rmilter/rmilter.pid;
|
||||||
|
|
||||||
# rmilter is socket-activated under systemd
|
bind_socket = unix:/var/spool/postfix/run/rmilter/rmilter.sock;
|
||||||
bind_socket = fd:3;
|
|
||||||
|
|
||||||
# DKIM signing
|
# DKIM signing
|
||||||
dkim {
|
dkim {
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
.include /lib/systemd/system/rmilter.socket
|
|
||||||
|
|
||||||
[Socket]
|
|
||||||
ListenStream=
|
|
||||||
ListenStream=127.0.0.1:11000
|
|
|
@ -253,6 +253,7 @@
|
||||||
"certmanager_domain_http_not_working": "It seems that the domain {domain:s} cannot be accessed through HTTP. Please check your DNS and nginx configuration is okay",
|
"certmanager_domain_http_not_working": "It seems that the domain {domain:s} cannot be accessed through HTTP. Please check your DNS and nginx configuration is okay",
|
||||||
"certmanager_error_no_A_record": "No DNS 'A' record found for {domain:s}. You need to make your domain name point to your machine to be able to install a Let's Encrypt certificate! (If you know what you are doing, use --no-checks to disable those checks.)",
|
"certmanager_error_no_A_record": "No DNS 'A' record found for {domain:s}. You need to make your domain name point to your machine to be able to install a Let's Encrypt certificate! (If you know what you are doing, use --no-checks to disable those checks.)",
|
||||||
"certmanager_domain_dns_ip_differs_from_public_ip": "The DNS 'A' record for domain {domain:s} is different from this server IP. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use --no-checks to disable those checks.)",
|
"certmanager_domain_dns_ip_differs_from_public_ip": "The DNS 'A' record for domain {domain:s} is different from this server IP. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use --no-checks to disable those checks.)",
|
||||||
|
"certmanager_domain_not_resolved_locally": "The domain {domain:s} cannot be resolved from inside your Yunohost server. This might happen if you recently modified your DNS record. If so, please wait a few hours for it to propagate. If the issue persists, consider adding {domain:s} to /etc/hosts. (If you know what you are doing, use --no-checks to disable those checks.)",
|
||||||
"certmanager_cannot_read_cert": "Something wrong happened when trying to open current certificate for domain {domain:s} (file: {file:s}), reason: {reason:s}",
|
"certmanager_cannot_read_cert": "Something wrong happened when trying to open current certificate for domain {domain:s} (file: {file:s}), reason: {reason:s}",
|
||||||
"certmanager_cert_install_success_selfsigned": "Successfully installed a self-signed certificate for domain {domain:s}!",
|
"certmanager_cert_install_success_selfsigned": "Successfully installed a self-signed certificate for domain {domain:s}!",
|
||||||
"certmanager_cert_install_success": "Successfully installed Let's Encrypt certificate for domain {domain:s}!",
|
"certmanager_cert_install_success": "Successfully installed Let's Encrypt certificate for domain {domain:s}!",
|
||||||
|
@ -264,5 +265,6 @@
|
||||||
"certmanager_conflicting_nginx_file": "Unable to prepare domain for ACME challenge: the nginx configuration file {filepath:s} is conflicting and should be removed first",
|
"certmanager_conflicting_nginx_file": "Unable to prepare domain for ACME challenge: the nginx configuration file {filepath:s} is conflicting and should be removed first",
|
||||||
"domain_cannot_remove_main": "Cannot remove main domain. Set a new main domain first",
|
"domain_cannot_remove_main": "Cannot remove main domain. Set a new main domain first",
|
||||||
"certmanager_self_ca_conf_file_not_found": "Configuration file not found for self-signing authority (file: {file:s})",
|
"certmanager_self_ca_conf_file_not_found": "Configuration file not found for self-signing authority (file: {file:s})",
|
||||||
|
"certmanager_acme_not_configured_for_domain": "Certificate for domain {domain:s} does not appear to be correctly installed. Please run cert-install for this domain first.",
|
||||||
"certmanager_unable_to_parse_self_CA_name": "Unable to parse name of self-signing authority (file: {file:s})"
|
"certmanager_unable_to_parse_self_CA_name": "Unable to parse name of self-signing authority (file: {file:s})"
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,7 +99,7 @@ def app_fetchlist(url=None, name=None):
|
||||||
m18n.n('custom_appslist_name_required'))
|
m18n.n('custom_appslist_name_required'))
|
||||||
|
|
||||||
list_file = '%s/%s.json' % (repo_path, name)
|
list_file = '%s/%s.json' % (repo_path, name)
|
||||||
if os.system('wget "%s" -O "%s.tmp"' % (url, list_file)) != 0:
|
if os.system('wget --timeout=30 "%s" -O "%s.tmp"' % (url, list_file)) != 0:
|
||||||
os.remove('%s.tmp' % list_file)
|
os.remove('%s.tmp' % list_file)
|
||||||
raise MoulinetteError(errno.EBADR, m18n.n('appslist_retrieve_error'))
|
raise MoulinetteError(errno.EBADR, m18n.n('appslist_retrieve_error'))
|
||||||
|
|
||||||
|
|
|
@ -313,8 +313,6 @@ def backup_create(name=None, description=None, output_directory=None,
|
||||||
link = "%s/%s.tar.gz" % (archives_path, name)
|
link = "%s/%s.tar.gz" % (archives_path, name)
|
||||||
os.symlink(archive_file, link)
|
os.symlink(archive_file, link)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Clean temporary directory
|
# Clean temporary directory
|
||||||
if tmp_dir != output_directory:
|
if tmp_dir != output_directory:
|
||||||
_clean_tmp_dir()
|
_clean_tmp_dir()
|
||||||
|
@ -323,7 +321,7 @@ def backup_create(name=None, description=None, output_directory=None,
|
||||||
|
|
||||||
# Return backup info
|
# Return backup info
|
||||||
info['name'] = name
|
info['name'] = name
|
||||||
return { 'archive': info }
|
return {'archive': info}
|
||||||
|
|
||||||
|
|
||||||
def backup_restore(auth, name, hooks=[], ignore_hooks=False,
|
def backup_restore(auth, name, hooks=[], ignore_hooks=False,
|
||||||
|
@ -601,7 +599,7 @@ def backup_list(with_info=False, human_readable=False):
|
||||||
d[a] = backup_info(a, human_readable=human_readable)
|
d[a] = backup_info(a, human_readable=human_readable)
|
||||||
result = d
|
result = d
|
||||||
|
|
||||||
return { 'archives': result }
|
return {'archives': result}
|
||||||
|
|
||||||
|
|
||||||
def backup_info(name, with_details=False, human_readable=False):
|
def backup_info(name, with_details=False, human_readable=False):
|
||||||
|
@ -645,7 +643,7 @@ def backup_info(name, with_details=False, human_readable=False):
|
||||||
size = info.get('size', 0)
|
size = info.get('size', 0)
|
||||||
if not size:
|
if not size:
|
||||||
tar = tarfile.open(archive_file, "r:gz")
|
tar = tarfile.open(archive_file, "r:gz")
|
||||||
size = reduce(lambda x,y: getattr(x, 'size', x)+getattr(y, 'size', y),
|
size = reduce(lambda x, y: getattr(x, 'size', x) + getattr(y, 'size', y),
|
||||||
tar.getmembers())
|
tar.getmembers())
|
||||||
tar.close()
|
tar.close()
|
||||||
if human_readable:
|
if human_readable:
|
||||||
|
@ -678,7 +676,7 @@ def backup_delete(name):
|
||||||
archive_file = '%s/%s.tar.gz' % (archives_path, name)
|
archive_file = '%s/%s.tar.gz' % (archives_path, name)
|
||||||
|
|
||||||
info_file = "%s/%s.info.json" % (archives_path, name)
|
info_file = "%s/%s.info.json" % (archives_path, name)
|
||||||
for backup_file in [archive_file,info_file]:
|
for backup_file in [archive_file, info_file]:
|
||||||
if not os.path.isfile(backup_file):
|
if not os.path.isfile(backup_file):
|
||||||
raise MoulinetteError(errno.EIO,
|
raise MoulinetteError(errno.EIO,
|
||||||
m18n.n('backup_archive_name_unknown', name=backup_file))
|
m18n.n('backup_archive_name_unknown', name=backup_file))
|
||||||
|
|
|
@ -31,6 +31,7 @@ import grp
|
||||||
import smtplib
|
import smtplib
|
||||||
import requests
|
import requests
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import socket
|
||||||
import dns.resolver
|
import dns.resolver
|
||||||
import glob
|
import glob
|
||||||
|
|
||||||
|
@ -323,7 +324,15 @@ def certificate_renew(auth, domain_list, force=False, no_checks=False, email=Fal
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Does it expire soon?
|
# Does it expire soon?
|
||||||
if force or status["validity"] <= VALIDITY_LIMIT:
|
if status["validity"] > VALIDITY_LIMIT and not force:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check ACME challenge configured for given domain
|
||||||
|
if not _check_acme_challenge_configuration(domain):
|
||||||
|
logger.warning(m18n.n(
|
||||||
|
'certmanager_acme_not_configured_for_domain', domain=domain))
|
||||||
|
continue
|
||||||
|
|
||||||
domain_list.append(domain)
|
domain_list.append(domain)
|
||||||
|
|
||||||
if len(domain_list) == 0:
|
if len(domain_list) == 0:
|
||||||
|
@ -341,7 +350,7 @@ def certificate_renew(auth, domain_list, force=False, no_checks=False, email=Fal
|
||||||
status = _get_status(domain)
|
status = _get_status(domain)
|
||||||
|
|
||||||
# Does it expire soon?
|
# Does it expire soon?
|
||||||
if not force or status["validity"] <= VALIDITY_LIMIT:
|
if status["validity"] > VALIDITY_LIMIT and not force:
|
||||||
raise MoulinetteError(errno.EINVAL, m18n.n(
|
raise MoulinetteError(errno.EINVAL, m18n.n(
|
||||||
'certmanager_attempt_to_renew_valid_cert', domain=domain))
|
'certmanager_attempt_to_renew_valid_cert', domain=domain))
|
||||||
|
|
||||||
|
@ -350,6 +359,11 @@ def certificate_renew(auth, domain_list, force=False, no_checks=False, email=Fal
|
||||||
raise MoulinetteError(errno.EINVAL, m18n.n(
|
raise MoulinetteError(errno.EINVAL, m18n.n(
|
||||||
'certmanager_attempt_to_renew_nonLE_cert', domain=domain))
|
'certmanager_attempt_to_renew_nonLE_cert', domain=domain))
|
||||||
|
|
||||||
|
# Check ACME challenge configured for given domain
|
||||||
|
if not _check_acme_challenge_configuration(domain):
|
||||||
|
raise MoulinetteError(errno.EINVAL, m18n.n(
|
||||||
|
'certmanager_acme_not_configured_for_domain', domain=domain))
|
||||||
|
|
||||||
if staging:
|
if staging:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Please note that you used the --staging option, and that no new certificate will actually be enabled !")
|
"Please note that you used the --staging option, and that no new certificate will actually be enabled !")
|
||||||
|
@ -362,6 +376,7 @@ def certificate_renew(auth, domain_list, force=False, no_checks=False, email=Fal
|
||||||
try:
|
try:
|
||||||
if not no_checks:
|
if not no_checks:
|
||||||
_check_domain_is_ready_for_ACME(domain)
|
_check_domain_is_ready_for_ACME(domain)
|
||||||
|
|
||||||
_fetch_and_enable_new_certificate(domain, staging)
|
_fetch_and_enable_new_certificate(domain, staging)
|
||||||
|
|
||||||
logger.success(
|
logger.success(
|
||||||
|
@ -487,6 +502,17 @@ location '/.well-known/acme-challenge'
|
||||||
app_ssowatconf(auth)
|
app_ssowatconf(auth)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_acme_challenge_configuration(domain):
|
||||||
|
# Check nginx conf file exists
|
||||||
|
nginx_conf_folder = "/etc/nginx/conf.d/%s.d" % domain
|
||||||
|
nginx_conf_file = "%s/000-acmechallenge.conf" % nginx_conf_folder
|
||||||
|
|
||||||
|
if not os.path.exists(nginx_conf_file):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def _fetch_and_enable_new_certificate(domain, staging=False):
|
def _fetch_and_enable_new_certificate(domain, staging=False):
|
||||||
# Make sure tmp folder exists
|
# Make sure tmp folder exists
|
||||||
logger.debug("Making sure tmp folders exists...")
|
logger.debug("Making sure tmp folders exists...")
|
||||||
|
@ -562,7 +588,9 @@ def _fetch_and_enable_new_certificate(domain, staging=False):
|
||||||
_set_permissions(new_cert_folder, "root", "root", 0655)
|
_set_permissions(new_cert_folder, "root", "root", 0655)
|
||||||
|
|
||||||
# Move the private key
|
# Move the private key
|
||||||
shutil.move(domain_key_file, os.path.join(new_cert_folder, "key.pem"))
|
domain_key_file_finaldest = os.path.join(new_cert_folder, "key.pem")
|
||||||
|
shutil.move(domain_key_file, domain_key_file_finaldest)
|
||||||
|
_set_permissions(domain_key_file_finaldest, "root", "metronome", 0640)
|
||||||
|
|
||||||
# Write the cert
|
# Write the cert
|
||||||
domain_cert_file = os.path.join(new_cert_folder, "crt.pem")
|
domain_cert_file = os.path.join(new_cert_folder, "crt.pem")
|
||||||
|
@ -785,6 +813,13 @@ def _check_domain_is_ready_for_ACME(domain):
|
||||||
raise MoulinetteError(errno.EINVAL, m18n.n(
|
raise MoulinetteError(errno.EINVAL, m18n.n(
|
||||||
'certmanager_domain_http_not_working', domain=domain))
|
'certmanager_domain_http_not_working', domain=domain))
|
||||||
|
|
||||||
|
# Check if domain is resolved locally (Might happen despite the previous
|
||||||
|
# checks because of dns propagation ?... Acme-tiny won't work in that case,
|
||||||
|
# because it explicitly requests() the domain.)
|
||||||
|
if not _domain_is_resolved_locally(public_ip, domain):
|
||||||
|
raise MoulinetteError(errno.EINVAL, m18n.n(
|
||||||
|
'certmanager_domain_not_resolved_locally', domain=domain))
|
||||||
|
|
||||||
|
|
||||||
def _dns_ip_match_public_ip(public_ip, domain):
|
def _dns_ip_match_public_ip(public_ip, domain):
|
||||||
try:
|
try:
|
||||||
|
@ -803,12 +838,24 @@ def _dns_ip_match_public_ip(public_ip, domain):
|
||||||
def _domain_is_accessible_through_HTTP(ip, domain):
|
def _domain_is_accessible_through_HTTP(ip, domain):
|
||||||
try:
|
try:
|
||||||
requests.head("http://" + ip, headers={"Host": domain})
|
requests.head("http://" + ip, headers={"Host": domain})
|
||||||
except Exception:
|
except Exception as e:
|
||||||
|
logger.debug("Couldn't reach domain '%s' by requesting this ip '%s' because: %s" % (domain, ip, e))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _domain_is_resolved_locally(public_ip, domain):
|
||||||
|
try:
|
||||||
|
ip = socket.gethostbyname(domain)
|
||||||
|
except socket.error as e:
|
||||||
|
logger.debug("Couldn't get domain '%s' ip because: %s" % (domain, e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
logger.debug("Domain '%s' ip is %s, except it to be 127.0.0.1 or %s" % (domain, public_ip))
|
||||||
|
return ip in ["127.0.0.1", public_ip]
|
||||||
|
|
||||||
|
|
||||||
def _name_self_CA():
|
def _name_self_CA():
|
||||||
ca_conf = os.path.join(SSL_DIR, "openssl.ca.cnf")
|
ca_conf = os.path.join(SSL_DIR, "openssl.ca.cnf")
|
||||||
|
|
||||||
|
|
|
@ -66,10 +66,10 @@ def domain_list(auth, filter=None, limit=None, offset=None):
|
||||||
result = auth.search('ou=domains,dc=yunohost,dc=org', filter, ['virtualdomain'])
|
result = auth.search('ou=domains,dc=yunohost,dc=org', filter, ['virtualdomain'])
|
||||||
|
|
||||||
if len(result) > offset and limit > 0:
|
if len(result) > offset and limit > 0:
|
||||||
for domain in result[offset:offset+limit]:
|
for domain in result[offset:offset + limit]:
|
||||||
result_list.append(domain['virtualdomain'][0])
|
result_list.append(domain['virtualdomain'][0])
|
||||||
|
|
||||||
return { 'domains': result_list }
|
return {'domains': result_list}
|
||||||
|
|
||||||
|
|
||||||
def domain_add(auth, domain, dyndns=False):
|
def domain_add(auth, domain, dyndns=False):
|
||||||
|
@ -83,7 +83,7 @@ def domain_add(auth, domain, dyndns=False):
|
||||||
"""
|
"""
|
||||||
from yunohost.hook import hook_callback
|
from yunohost.hook import hook_callback
|
||||||
|
|
||||||
attr_dict = { 'objectClass' : ['mailDomain', 'top'] }
|
attr_dict = {'objectClass': ['mailDomain', 'top']}
|
||||||
|
|
||||||
now = datetime.datetime.now()
|
now = datetime.datetime.now()
|
||||||
timestamp = str(now.year) + str(now.month) + str(now.day)
|
timestamp = str(now.year) + str(now.month) + str(now.day)
|
||||||
|
@ -113,7 +113,6 @@ def domain_add(auth, domain, dyndns=False):
|
||||||
raise MoulinetteError(errno.EINVAL,
|
raise MoulinetteError(errno.EINVAL,
|
||||||
m18n.n('domain_dyndns_root_unknown'))
|
m18n.n('domain_dyndns_root_unknown'))
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yunohost.certificate._certificate_install_selfsigned([domain], False)
|
yunohost.certificate._certificate_install_selfsigned([domain], False)
|
||||||
|
|
||||||
|
@ -122,7 +121,6 @@ def domain_add(auth, domain, dyndns=False):
|
||||||
except MoulinetteError:
|
except MoulinetteError:
|
||||||
raise MoulinetteError(errno.EEXIST, m18n.n('domain_exists'))
|
raise MoulinetteError(errno.EEXIST, m18n.n('domain_exists'))
|
||||||
|
|
||||||
|
|
||||||
attr_dict['virtualdomain'] = domain
|
attr_dict['virtualdomain'] = domain
|
||||||
|
|
||||||
if not auth.add('virtualdomain=%s,ou=domains' % domain, attr_dict):
|
if not auth.add('virtualdomain=%s,ou=domains' % domain, attr_dict):
|
||||||
|
@ -133,11 +131,14 @@ def domain_add(auth, domain, dyndns=False):
|
||||||
service_regen_conf(names=[
|
service_regen_conf(names=[
|
||||||
'nginx', 'metronome', 'dnsmasq', 'rmilter'])
|
'nginx', 'metronome', 'dnsmasq', 'rmilter'])
|
||||||
os.system('yunohost app ssowatconf > /dev/null 2>&1')
|
os.system('yunohost app ssowatconf > /dev/null 2>&1')
|
||||||
except IOError: pass
|
except IOError:
|
||||||
|
pass
|
||||||
except:
|
except:
|
||||||
# Force domain removal silently
|
# Force domain removal silently
|
||||||
try: domain_remove(auth, domain, True)
|
try:
|
||||||
except: pass
|
domain_remove(auth, domain, True)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
raise
|
raise
|
||||||
|
|
||||||
hook_callback('post_domain_add', args=[domain])
|
hook_callback('post_domain_add', args=[domain])
|
||||||
|
@ -165,7 +166,7 @@ def domain_remove(auth, domain, force=False):
|
||||||
|
|
||||||
# Check if apps are installed on the domain
|
# Check if apps are installed on the domain
|
||||||
for app in os.listdir('/etc/yunohost/apps/'):
|
for app in os.listdir('/etc/yunohost/apps/'):
|
||||||
with open('/etc/yunohost/apps/' + app +'/settings.yml') as f:
|
with open('/etc/yunohost/apps/' + app + '/settings.yml') as f:
|
||||||
try:
|
try:
|
||||||
app_domain = yaml.load(f)['domain']
|
app_domain = yaml.load(f)['domain']
|
||||||
except:
|
except:
|
||||||
|
@ -246,7 +247,7 @@ def domain_dns_conf(domain, ttl=None):
|
||||||
r'^(?P<host>[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+[^"]*'
|
r'^(?P<host>[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+[^"]*'
|
||||||
'(?=.*(;[\s]*|")v=(?P<v>[^";]+))'
|
'(?=.*(;[\s]*|")v=(?P<v>[^";]+))'
|
||||||
'(?=.*(;[\s]*|")k=(?P<k>[^";]+))'
|
'(?=.*(;[\s]*|")k=(?P<k>[^";]+))'
|
||||||
'(?=.*(;[\s]*|")p=(?P<p>[^";]+))'), dkim_content, re.M|re.S
|
'(?=.*(;[\s]*|")p=(?P<p>[^";]+))'), dkim_content, re.M | re.S
|
||||||
)
|
)
|
||||||
if dkim:
|
if dkim:
|
||||||
result += '\n{host}. {ttl} IN TXT "v={v}; k={k}; p={p}"'.format(
|
result += '\n{host}. {ttl} IN TXT "v={v}; k={k}; p={p}"'.format(
|
||||||
|
@ -296,6 +297,7 @@ def _get_maindomain():
|
||||||
maindomain = f.readline().rstrip()
|
maindomain = f.readline().rstrip()
|
||||||
return maindomain
|
return maindomain
|
||||||
|
|
||||||
|
|
||||||
def _set_maindomain(domain):
|
def _set_maindomain(domain):
|
||||||
with open('/etc/yunohost/current_host', 'w') as f:
|
with open('/etc/yunohost/current_host', 'w') as f:
|
||||||
f.write(domain)
|
f.write(domain)
|
||||||
|
|
|
@ -94,7 +94,7 @@ def dyndns_subscribe(subscribe_host="dyndns.yunohost.org", domain=None, key=None
|
||||||
|
|
||||||
logger.info(m18n.n('dyndns_key_generating'))
|
logger.info(m18n.n('dyndns_key_generating'))
|
||||||
|
|
||||||
os.system('cd /etc/yunohost/dyndns && ' \
|
os.system('cd /etc/yunohost/dyndns && '
|
||||||
'dnssec-keygen -a hmac-md5 -b 128 -r /dev/urandom -n USER %s' % domain)
|
'dnssec-keygen -a hmac-md5 -b 128 -r /dev/urandom -n USER %s' % domain)
|
||||||
os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private')
|
os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private')
|
||||||
|
|
||||||
|
@ -104,12 +104,14 @@ def dyndns_subscribe(subscribe_host="dyndns.yunohost.org", domain=None, key=None
|
||||||
|
|
||||||
# Send subscription
|
# Send subscription
|
||||||
try:
|
try:
|
||||||
r = requests.post('https://%s/key/%s' % (subscribe_host, base64.b64encode(key)), data={ 'subdomain': domain })
|
r = requests.post('https://%s/key/%s' % (subscribe_host, base64.b64encode(key)), data={'subdomain': domain})
|
||||||
except requests.ConnectionError:
|
except requests.ConnectionError:
|
||||||
raise MoulinetteError(errno.ENETUNREACH, m18n.n('no_internet_connection'))
|
raise MoulinetteError(errno.ENETUNREACH, m18n.n('no_internet_connection'))
|
||||||
if r.status_code != 201:
|
if r.status_code != 201:
|
||||||
try: error = json.loads(r.text)['error']
|
try:
|
||||||
except: error = "Server error"
|
error = json.loads(r.text)['error']
|
||||||
|
except:
|
||||||
|
error = "Server error"
|
||||||
raise MoulinetteError(errno.EPERM,
|
raise MoulinetteError(errno.EPERM,
|
||||||
m18n.n('dyndns_registration_failed', error=error))
|
m18n.n('dyndns_registration_failed', error=error))
|
||||||
|
|
||||||
|
|
|
@ -67,14 +67,14 @@ def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False,
|
||||||
# Validate protocols
|
# Validate protocols
|
||||||
protocols = ['TCP', 'UDP']
|
protocols = ['TCP', 'UDP']
|
||||||
if protocol != 'Both' and protocol in protocols:
|
if protocol != 'Both' and protocol in protocols:
|
||||||
protocols = [protocol,]
|
protocols = [protocol, ]
|
||||||
|
|
||||||
# Validate IP versions
|
# Validate IP versions
|
||||||
ipvs = ['ipv4', 'ipv6']
|
ipvs = ['ipv4', 'ipv6']
|
||||||
if ipv4_only and not ipv6_only:
|
if ipv4_only and not ipv6_only:
|
||||||
ipvs = ['ipv4',]
|
ipvs = ['ipv4', ]
|
||||||
elif ipv6_only and not ipv4_only:
|
elif ipv6_only and not ipv4_only:
|
||||||
ipvs = ['ipv6',]
|
ipvs = ['ipv6', ]
|
||||||
|
|
||||||
for p in protocols:
|
for p in protocols:
|
||||||
# Iterate over IP versions to add port
|
# Iterate over IP versions to add port
|
||||||
|
@ -117,7 +117,7 @@ def firewall_disallow(protocol, port, ipv4_only=False, ipv6_only=False,
|
||||||
# Validate protocols
|
# Validate protocols
|
||||||
protocols = ['TCP', 'UDP']
|
protocols = ['TCP', 'UDP']
|
||||||
if protocol != 'Both' and protocol in protocols:
|
if protocol != 'Both' and protocol in protocols:
|
||||||
protocols = [protocol,]
|
protocols = [protocol, ]
|
||||||
|
|
||||||
# Validate IP versions and UPnP
|
# Validate IP versions and UPnP
|
||||||
ipvs = ['ipv4', 'ipv6']
|
ipvs = ['ipv4', 'ipv6']
|
||||||
|
@ -125,10 +125,10 @@ def firewall_disallow(protocol, port, ipv4_only=False, ipv6_only=False,
|
||||||
if ipv4_only and ipv6_only:
|
if ipv4_only and ipv6_only:
|
||||||
upnp = True # automatically disallow UPnP
|
upnp = True # automatically disallow UPnP
|
||||||
elif ipv4_only:
|
elif ipv4_only:
|
||||||
ipvs = ['ipv4',]
|
ipvs = ['ipv4', ]
|
||||||
upnp = upnp_only
|
upnp = upnp_only
|
||||||
elif ipv6_only:
|
elif ipv6_only:
|
||||||
ipvs = ['ipv6',]
|
ipvs = ['ipv6', ]
|
||||||
upnp = upnp_only
|
upnp = upnp_only
|
||||||
elif upnp_only:
|
elif upnp_only:
|
||||||
ipvs = []
|
ipvs = []
|
||||||
|
@ -178,7 +178,7 @@ def firewall_list(raw=False, by_ip_version=False, list_forwarded=False):
|
||||||
ports = sorted(set(ports['ipv4']) | set(ports['ipv6']))
|
ports = sorted(set(ports['ipv4']) | set(ports['ipv6']))
|
||||||
|
|
||||||
# Format returned dict
|
# Format returned dict
|
||||||
ret = { "opened_ports": ports }
|
ret = {"opened_ports": ports}
|
||||||
if list_forwarded:
|
if list_forwarded:
|
||||||
# Combine TCP and UDP forwarded ports
|
# Combine TCP and UDP forwarded ports
|
||||||
ret['forwarded_ports'] = sorted(
|
ret['forwarded_ports'] = sorted(
|
||||||
|
@ -224,7 +224,7 @@ def firewall_reload(skip_upnp=False):
|
||||||
# Iterate over ports and add rule
|
# Iterate over ports and add rule
|
||||||
for protocol in ['TCP', 'UDP']:
|
for protocol in ['TCP', 'UDP']:
|
||||||
for port in firewall['ipv4'][protocol]:
|
for port in firewall['ipv4'][protocol]:
|
||||||
rules.append("iptables -w -A INPUT -p %s --dport %s -j ACCEPT" \
|
rules.append("iptables -w -A INPUT -p %s --dport %s -j ACCEPT"
|
||||||
% (protocol, process.quote(str(port))))
|
% (protocol, process.quote(str(port))))
|
||||||
rules += [
|
rules += [
|
||||||
"iptables -w -A INPUT -i lo -j ACCEPT",
|
"iptables -w -A INPUT -i lo -j ACCEPT",
|
||||||
|
@ -253,7 +253,7 @@ def firewall_reload(skip_upnp=False):
|
||||||
# Iterate over ports and add rule
|
# Iterate over ports and add rule
|
||||||
for protocol in ['TCP', 'UDP']:
|
for protocol in ['TCP', 'UDP']:
|
||||||
for port in firewall['ipv6'][protocol]:
|
for port in firewall['ipv6'][protocol]:
|
||||||
rules.append("ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT" \
|
rules.append("ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT"
|
||||||
% (protocol, process.quote(str(port))))
|
% (protocol, process.quote(str(port))))
|
||||||
rules += [
|
rules += [
|
||||||
"ip6tables -w -A INPUT -i lo -j ACCEPT",
|
"ip6tables -w -A INPUT -i lo -j ACCEPT",
|
||||||
|
@ -308,13 +308,14 @@ def firewall_upnp(action='status', no_refresh=False):
|
||||||
try:
|
try:
|
||||||
# Remove old cron job
|
# Remove old cron job
|
||||||
os.remove('/etc/cron.d/yunohost-firewall')
|
os.remove('/etc/cron.d/yunohost-firewall')
|
||||||
except: pass
|
except:
|
||||||
|
pass
|
||||||
action = 'status'
|
action = 'status'
|
||||||
no_refresh = False
|
no_refresh = False
|
||||||
|
|
||||||
if action == 'status' and no_refresh:
|
if action == 'status' and no_refresh:
|
||||||
# Only return current state
|
# Only return current state
|
||||||
return { 'enabled': enabled }
|
return {'enabled': enabled}
|
||||||
elif action == 'enable' or (enabled and action == 'status'):
|
elif action == 'enable' or (enabled and action == 'status'):
|
||||||
# Add cron job
|
# Add cron job
|
||||||
with open(upnp_cron_job, 'w+') as f:
|
with open(upnp_cron_job, 'w+') as f:
|
||||||
|
@ -330,7 +331,8 @@ def firewall_upnp(action='status', no_refresh=False):
|
||||||
try:
|
try:
|
||||||
# Remove cron job
|
# Remove cron job
|
||||||
os.remove(upnp_cron_job)
|
os.remove(upnp_cron_job)
|
||||||
except: pass
|
except:
|
||||||
|
pass
|
||||||
enabled = False
|
enabled = False
|
||||||
if action == 'status':
|
if action == 'status':
|
||||||
no_refresh = True
|
no_refresh = True
|
||||||
|
@ -364,7 +366,8 @@ def firewall_upnp(action='status', no_refresh=False):
|
||||||
if upnpc.getspecificportmapping(port, protocol):
|
if upnpc.getspecificportmapping(port, protocol):
|
||||||
try:
|
try:
|
||||||
upnpc.deleteportmapping(port, protocol)
|
upnpc.deleteportmapping(port, protocol)
|
||||||
except: pass
|
except:
|
||||||
|
pass
|
||||||
if not enabled:
|
if not enabled:
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
|
@ -403,7 +406,7 @@ def firewall_upnp(action='status', no_refresh=False):
|
||||||
|
|
||||||
if action == 'enable' and not enabled:
|
if action == 'enable' and not enabled:
|
||||||
raise MoulinetteError(errno.ENXIO, m18n.n('upnp_port_open_failed'))
|
raise MoulinetteError(errno.ENXIO, m18n.n('upnp_port_open_failed'))
|
||||||
return { 'enabled': enabled }
|
return {'enabled': enabled}
|
||||||
|
|
||||||
|
|
||||||
def firewall_stop():
|
def firewall_stop():
|
||||||
|
@ -444,12 +447,14 @@ def _get_ssh_port(default=22):
|
||||||
pass
|
pass
|
||||||
return default
|
return default
|
||||||
|
|
||||||
|
|
||||||
def _update_firewall_file(rules):
|
def _update_firewall_file(rules):
|
||||||
"""Make a backup and write new rules to firewall file"""
|
"""Make a backup and write new rules to firewall file"""
|
||||||
os.system("cp {0} {0}.old".format(firewall_file))
|
os.system("cp {0} {0}.old".format(firewall_file))
|
||||||
with open(firewall_file, 'w') as f:
|
with open(firewall_file, 'w') as f:
|
||||||
yaml.safe_dump(rules, f, default_flow_style=False)
|
yaml.safe_dump(rules, f, default_flow_style=False)
|
||||||
|
|
||||||
|
|
||||||
def _on_rule_command_error(returncode, cmd, output):
|
def _on_rule_command_error(returncode, cmd, output):
|
||||||
"""Callback for rules commands error"""
|
"""Callback for rules commands error"""
|
||||||
# Log error and continue commands execution
|
# Log error and continue commands execution
|
||||||
|
|
|
@ -87,13 +87,13 @@ def monitor_disk(units=None, mountpoint=None, human_readable=False):
|
||||||
# Retrieve monitoring for unit(s)
|
# Retrieve monitoring for unit(s)
|
||||||
for u in units:
|
for u in units:
|
||||||
if u == 'io':
|
if u == 'io':
|
||||||
## Define setter
|
# Define setter
|
||||||
if len(units) > 1:
|
if len(units) > 1:
|
||||||
def _set(dn, dvalue):
|
def _set(dn, dvalue):
|
||||||
try:
|
try:
|
||||||
result[dn][u] = dvalue
|
result[dn][u] = dvalue
|
||||||
except KeyError:
|
except KeyError:
|
||||||
result[dn] = { u: dvalue }
|
result[dn] = {u: dvalue}
|
||||||
else:
|
else:
|
||||||
def _set(dn, dvalue):
|
def _set(dn, dvalue):
|
||||||
result[dn] = dvalue
|
result[dn] = dvalue
|
||||||
|
@ -111,13 +111,13 @@ def monitor_disk(units=None, mountpoint=None, human_readable=False):
|
||||||
for dname in devices_names:
|
for dname in devices_names:
|
||||||
_set(dname, 'not-available')
|
_set(dname, 'not-available')
|
||||||
elif u == 'filesystem':
|
elif u == 'filesystem':
|
||||||
## Define setter
|
# Define setter
|
||||||
if len(units) > 1:
|
if len(units) > 1:
|
||||||
def _set(dn, dvalue):
|
def _set(dn, dvalue):
|
||||||
try:
|
try:
|
||||||
result[dn][u] = dvalue
|
result[dn][u] = dvalue
|
||||||
except KeyError:
|
except KeyError:
|
||||||
result[dn] = { u: dvalue }
|
result[dn] = {u: dvalue}
|
||||||
else:
|
else:
|
||||||
def _set(dn, dvalue):
|
def _set(dn, dvalue):
|
||||||
result[dn] = dvalue
|
result[dn] = dvalue
|
||||||
|
@ -183,11 +183,11 @@ def monitor_network(units=None, human_readable=False):
|
||||||
smtp_check = m18n.n('network_check_smtp_ko')
|
smtp_check = m18n.n('network_check_smtp_ko')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
answers = dns.resolver.query(domain,'MX')
|
answers = dns.resolver.query(domain, 'MX')
|
||||||
mx_check = {}
|
mx_check = {}
|
||||||
i = 0
|
i = 0
|
||||||
for server in answers:
|
for server in answers:
|
||||||
mx_id = 'mx%s' %i
|
mx_id = 'mx%s' % i
|
||||||
mx_check[mx_id] = server
|
mx_check[mx_id] = server
|
||||||
i = i + 1
|
i = i + 1
|
||||||
except:
|
except:
|
||||||
|
@ -307,7 +307,7 @@ def monitor_update_stats(period):
|
||||||
|
|
||||||
stats = _retrieve_stats(period)
|
stats = _retrieve_stats(period)
|
||||||
if not stats:
|
if not stats:
|
||||||
stats = { 'disk': {}, 'network': {}, 'system': {}, 'timestamp': [] }
|
stats = {'disk': {}, 'network': {}, 'system': {}, 'timestamp': []}
|
||||||
|
|
||||||
monitor = None
|
monitor = None
|
||||||
# Get monitoring stats
|
# Get monitoring stats
|
||||||
|
@ -357,7 +357,7 @@ def monitor_update_stats(period):
|
||||||
if 'usage' in stats['network'] and iname in stats['network']['usage']:
|
if 'usage' in stats['network'] and iname in stats['network']['usage']:
|
||||||
curr = stats['network']['usage'][iname]
|
curr = stats['network']['usage'][iname]
|
||||||
net_usage[iname] = _append_to_stats(curr, values, 'time_since_update')
|
net_usage[iname] = _append_to_stats(curr, values, 'time_since_update')
|
||||||
stats['network'] = { 'usage': net_usage, 'infos': monitor['network']['infos'] }
|
stats['network'] = {'usage': net_usage, 'infos': monitor['network']['infos']}
|
||||||
|
|
||||||
# Append system stats
|
# Append system stats
|
||||||
for unit, values in monitor['system'].items():
|
for unit, values in monitor['system'].items():
|
||||||
|
@ -530,7 +530,7 @@ def binary_to_human(n, customary=False):
|
||||||
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
|
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
|
||||||
prefix = {}
|
prefix = {}
|
||||||
for i, s in enumerate(symbols):
|
for i, s in enumerate(symbols):
|
||||||
prefix[s] = 1 << (i+1)*10
|
prefix[s] = 1 << (i + 1) * 10
|
||||||
for s in reversed(symbols):
|
for s in reversed(symbols):
|
||||||
if n >= prefix[s]:
|
if n >= prefix[s]:
|
||||||
value = float(n) / prefix[s]
|
value = float(n) / prefix[s]
|
||||||
|
@ -590,7 +590,7 @@ def _save_stats(stats, period, date=None):
|
||||||
# Limit stats
|
# Limit stats
|
||||||
if date is None:
|
if date is None:
|
||||||
t = stats['timestamp']
|
t = stats['timestamp']
|
||||||
limit = { 'day': 86400, 'week': 604800, 'month': 2419200 }
|
limit = {'day': 86400, 'week': 604800, 'month': 2419200}
|
||||||
if (t[len(t) - 1] - t[0]) > limit[period]:
|
if (t[len(t) - 1] - t[0]) > limit[period]:
|
||||||
begin = t[len(t) - 1] - limit[period]
|
begin = t[len(t) - 1] - limit[period]
|
||||||
stats = _filter_stats(stats, begin)
|
stats = _filter_stats(stats, begin)
|
||||||
|
@ -612,7 +612,7 @@ def _monitor_all(period=None, since=None):
|
||||||
since -- Timestamp of the stats beginning
|
since -- Timestamp of the stats beginning
|
||||||
|
|
||||||
"""
|
"""
|
||||||
result = { 'disk': {}, 'network': {}, 'system': {} }
|
result = {'disk': {}, 'network': {}, 'system': {}}
|
||||||
|
|
||||||
# Real-time stats
|
# Real-time stats
|
||||||
if period == 'day' and since is None:
|
if period == 'day' and since is None:
|
||||||
|
@ -697,7 +697,7 @@ def _calculate_stats_mean(stats):
|
||||||
s[k] = _mean(v, t, ts)
|
s[k] = _mean(v, t, ts)
|
||||||
elif isinstance(v, list):
|
elif isinstance(v, list):
|
||||||
try:
|
try:
|
||||||
nums = [ float(x * t[i]) for i, x in enumerate(v) ]
|
nums = [float(x * t[i]) for i, x in enumerate(v)]
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -60,9 +60,9 @@ def service_add(name, status=None, log=None, runlevel=None):
|
||||||
services = _get_services()
|
services = _get_services()
|
||||||
|
|
||||||
if not status:
|
if not status:
|
||||||
services[name] = { 'status': 'service' }
|
services[name] = {'status': 'service'}
|
||||||
else:
|
else:
|
||||||
services[name] = { 'status': status }
|
services[name] = {'status': status}
|
||||||
|
|
||||||
if log is not None:
|
if log is not None:
|
||||||
services[name]['log'] = log
|
services[name]['log'] = log
|
||||||
|
@ -211,7 +211,7 @@ def service_status(names=[]):
|
||||||
if 'runlevel' in services[name].keys():
|
if 'runlevel' in services[name].keys():
|
||||||
runlevel = int(services[name]['runlevel'])
|
runlevel = int(services[name]['runlevel'])
|
||||||
|
|
||||||
result[name] = { 'status': 'unknown', 'loaded': 'unknown' }
|
result[name] = {'status': 'unknown', 'loaded': 'unknown'}
|
||||||
|
|
||||||
# Retrieve service status
|
# Retrieve service status
|
||||||
try:
|
try:
|
||||||
|
@ -261,7 +261,7 @@ def service_log(name, number=50):
|
||||||
|
|
||||||
for log_path in log_list:
|
for log_path in log_list:
|
||||||
if os.path.isdir(log_path):
|
if os.path.isdir(log_path):
|
||||||
for log in [ f for f in os.listdir(log_path) if os.path.isfile(os.path.join(log_path, f)) and f[-4:] == '.log' ]:
|
for log in [f for f in os.listdir(log_path) if os.path.isfile(os.path.join(log_path, f)) and f[-4:] == '.log']:
|
||||||
result[os.path.join(log_path, log)] = _tail(os.path.join(log_path, log), int(number))
|
result[os.path.join(log_path, log)] = _tail(os.path.join(log_path, log), int(number))
|
||||||
else:
|
else:
|
||||||
result[log_path] = _tail(log_path, int(number))
|
result[log_path] = _tail(log_path, int(number))
|
||||||
|
@ -314,13 +314,14 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
|
||||||
common_args = [1 if force else 0, 1 if dry_run else 0]
|
common_args = [1 if force else 0, 1 if dry_run else 0]
|
||||||
|
|
||||||
# Execute hooks for pre-regen
|
# Execute hooks for pre-regen
|
||||||
pre_args = ['pre',] + common_args
|
pre_args = ['pre', ] + common_args
|
||||||
|
|
||||||
def _pre_call(name, priority, path, args):
|
def _pre_call(name, priority, path, args):
|
||||||
# create the pending conf directory for the service
|
# create the pending conf directory for the service
|
||||||
service_pending_path = os.path.join(pending_conf_dir, name)
|
service_pending_path = os.path.join(pending_conf_dir, name)
|
||||||
filesystem.mkdir(service_pending_path, 0755, True, uid='admin')
|
filesystem.mkdir(service_pending_path, 0755, True, uid='admin')
|
||||||
# return the arguments to pass to the script
|
# return the arguments to pass to the script
|
||||||
return pre_args + [service_pending_path,]
|
return pre_args + [service_pending_path, ]
|
||||||
pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call)
|
pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call)
|
||||||
|
|
||||||
# Update the services name
|
# Update the services name
|
||||||
|
@ -336,7 +337,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
|
||||||
# Iterate over services and process pending conf
|
# Iterate over services and process pending conf
|
||||||
for service, conf_files in _get_pending_conf(names).items():
|
for service, conf_files in _get_pending_conf(names).items():
|
||||||
logger.info(m18n.n(
|
logger.info(m18n.n(
|
||||||
'service_regenconf_pending_applying' if not dry_run else \
|
'service_regenconf_pending_applying' if not dry_run else
|
||||||
'service_regenconf_dry_pending_applying',
|
'service_regenconf_dry_pending_applying',
|
||||||
service=service))
|
service=service))
|
||||||
|
|
||||||
|
@ -444,7 +445,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
|
||||||
continue
|
continue
|
||||||
elif not failed_regen:
|
elif not failed_regen:
|
||||||
logger.success(m18n.n(
|
logger.success(m18n.n(
|
||||||
'service_conf_updated' if not dry_run else \
|
'service_conf_updated' if not dry_run else
|
||||||
'service_conf_would_be_updated',
|
'service_conf_would_be_updated',
|
||||||
service=service))
|
service=service))
|
||||||
if succeed_regen and not dry_run:
|
if succeed_regen and not dry_run:
|
||||||
|
@ -461,14 +462,15 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# Execute hooks for post-regen
|
# Execute hooks for post-regen
|
||||||
post_args = ['post',] + common_args
|
post_args = ['post', ] + common_args
|
||||||
|
|
||||||
def _pre_call(name, priority, path, args):
|
def _pre_call(name, priority, path, args):
|
||||||
# append coma-separated applied changes for the service
|
# append coma-separated applied changes for the service
|
||||||
if name in result and result[name]['applied']:
|
if name in result and result[name]['applied']:
|
||||||
regen_conf_files = ','.join(result[name]['applied'].keys())
|
regen_conf_files = ','.join(result[name]['applied'].keys())
|
||||||
else:
|
else:
|
||||||
regen_conf_files = ''
|
regen_conf_files = ''
|
||||||
return post_args + [regen_conf_files,]
|
return post_args + [regen_conf_files, ]
|
||||||
hook_callback('conf_regen', names, pre_callback=_pre_call)
|
hook_callback('conf_regen', names, pre_callback=_pre_call)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
@ -556,7 +558,8 @@ def _tail(file, n, offset=None):
|
||||||
return lines[-to_read:offset and -offset or None]
|
return lines[-to_read:offset and -offset or None]
|
||||||
avg_line_length *= 1.3
|
avg_line_length *= 1.3
|
||||||
|
|
||||||
except IOError: return []
|
except IOError:
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True):
|
def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True):
|
||||||
|
|
|
@ -178,7 +178,8 @@ def tools_postinstall(domain, password, ignore_dyndns=False):
|
||||||
|
|
||||||
Keyword argument:
|
Keyword argument:
|
||||||
domain -- YunoHost main domain
|
domain -- YunoHost main domain
|
||||||
ignore_dyndns -- Do not subscribe domain to a DynDNS service
|
ignore_dyndns -- Do not subscribe domain to a DynDNS service (only
|
||||||
|
needed for nohost.me, noho.st domains)
|
||||||
password -- YunoHost admin password
|
password -- YunoHost admin password
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -203,6 +204,10 @@ def tools_postinstall(domain, password, ignore_dyndns=False):
|
||||||
else:
|
else:
|
||||||
raise MoulinetteError(errno.EEXIST,
|
raise MoulinetteError(errno.EEXIST,
|
||||||
m18n.n('dyndns_unavailable'))
|
m18n.n('dyndns_unavailable'))
|
||||||
|
else:
|
||||||
|
dyndns = False
|
||||||
|
else:
|
||||||
|
dyndns = False
|
||||||
|
|
||||||
logger.info(m18n.n('yunohost_installing'))
|
logger.info(m18n.n('yunohost_installing'))
|
||||||
|
|
||||||
|
@ -296,7 +301,6 @@ def tools_postinstall(domain, password, ignore_dyndns=False):
|
||||||
os.system('service yunohost-firewall start')
|
os.system('service yunohost-firewall start')
|
||||||
|
|
||||||
service_regen_conf(force=True)
|
service_regen_conf(force=True)
|
||||||
|
|
||||||
logger.success(m18n.n('yunohost_configured'))
|
logger.success(m18n.n('yunohost_configured'))
|
||||||
|
|
||||||
|
|
||||||
|
@ -416,7 +420,7 @@ def tools_upgrade(auth, ignore_apps=False, ignore_packages=False):
|
||||||
apt.progress.base.InstallProgress())
|
apt.progress.base.InstallProgress())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
failure = True
|
failure = True
|
||||||
logging.warning('unable to upgrade packages: %s' % str(e))
|
logger.warning('unable to upgrade packages: %s' % str(e))
|
||||||
logger.error(m18n.n('packages_upgrade_failed'))
|
logger.error(m18n.n('packages_upgrade_failed'))
|
||||||
else:
|
else:
|
||||||
logger.info(m18n.n('done'))
|
logger.info(m18n.n('done'))
|
||||||
|
@ -428,7 +432,7 @@ def tools_upgrade(auth, ignore_apps=False, ignore_packages=False):
|
||||||
app_upgrade(auth)
|
app_upgrade(auth)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
failure = True
|
failure = True
|
||||||
logging.warning('unable to upgrade apps: %s' % str(e))
|
logger.warning('unable to upgrade apps: %s' % str(e))
|
||||||
logger.error(m18n.n('app_upgrade_failed'))
|
logger.error(m18n.n('app_upgrade_failed'))
|
||||||
|
|
||||||
if not failure:
|
if not failure:
|
||||||
|
|
|
@ -50,12 +50,12 @@ def user_list(auth, fields=None, filter=None, limit=None, offset=None):
|
||||||
fields -- fields to fetch
|
fields -- fields to fetch
|
||||||
|
|
||||||
"""
|
"""
|
||||||
user_attrs = { 'uid': 'username',
|
user_attrs = {'uid': 'username',
|
||||||
'cn': 'fullname',
|
'cn': 'fullname',
|
||||||
'mail': 'mail',
|
'mail': 'mail',
|
||||||
'maildrop': 'mail-forward',
|
'maildrop': 'mail-forward',
|
||||||
'mailuserquota': 'mailbox-quota' }
|
'mailuserquota': 'mailbox-quota'}
|
||||||
attrs = [ 'uid' ]
|
attrs = ['uid']
|
||||||
users = {}
|
users = {}
|
||||||
|
|
||||||
# Set default arguments values
|
# Set default arguments values
|
||||||
|
@ -74,12 +74,12 @@ def user_list(auth, fields=None, filter=None, limit=None, offset=None):
|
||||||
raise MoulinetteError(errno.EINVAL,
|
raise MoulinetteError(errno.EINVAL,
|
||||||
m18n.n('field_invalid', attr))
|
m18n.n('field_invalid', attr))
|
||||||
else:
|
else:
|
||||||
attrs = [ 'uid', 'cn', 'mail', 'mailuserquota' ]
|
attrs = ['uid', 'cn', 'mail', 'mailuserquota']
|
||||||
|
|
||||||
result = auth.search('ou=users,dc=yunohost,dc=org', filter, attrs)
|
result = auth.search('ou=users,dc=yunohost,dc=org', filter, attrs)
|
||||||
|
|
||||||
if len(result) > offset and limit > 0:
|
if len(result) > offset and limit > 0:
|
||||||
for user in result[offset:offset+limit]:
|
for user in result[offset:offset + limit]:
|
||||||
entry = {}
|
entry = {}
|
||||||
for attr, values in user.items():
|
for attr, values in user.items():
|
||||||
try:
|
try:
|
||||||
|
@ -88,7 +88,7 @@ def user_list(auth, fields=None, filter=None, limit=None, offset=None):
|
||||||
pass
|
pass
|
||||||
uid = entry[user_attrs['uid']]
|
uid = entry[user_attrs['uid']]
|
||||||
users[uid] = entry
|
users[uid] = entry
|
||||||
return { 'users' : users }
|
return {'users': users}
|
||||||
|
|
||||||
|
|
||||||
def user_create(auth, username, firstname, lastname, mail, password,
|
def user_create(auth, username, firstname, lastname, mail, password,
|
||||||
|
@ -112,8 +112,8 @@ def user_create(auth, username, firstname, lastname, mail, password,
|
||||||
|
|
||||||
# Validate uniqueness of username and mail in LDAP
|
# Validate uniqueness of username and mail in LDAP
|
||||||
auth.validate_uniqueness({
|
auth.validate_uniqueness({
|
||||||
'uid' : username,
|
'uid': username,
|
||||||
'mail' : mail
|
'mail': mail
|
||||||
})
|
})
|
||||||
|
|
||||||
# Validate uniqueness of username in system users
|
# Validate uniqueness of username in system users
|
||||||
|
@ -125,10 +125,10 @@ def user_create(auth, username, firstname, lastname, mail, password,
|
||||||
raise MoulinetteError(errno.EEXIST, m18n.n('system_username_exists'))
|
raise MoulinetteError(errno.EEXIST, m18n.n('system_username_exists'))
|
||||||
|
|
||||||
# Check that the mail domain exists
|
# Check that the mail domain exists
|
||||||
if mail[mail.find('@')+1:] not in domain_list(auth)['domains']:
|
if mail[mail.find('@') + 1:] not in domain_list(auth)['domains']:
|
||||||
raise MoulinetteError(errno.EINVAL,
|
raise MoulinetteError(errno.EINVAL,
|
||||||
m18n.n('mail_domain_unknown',
|
m18n.n('mail_domain_unknown',
|
||||||
domain=mail[mail.find('@')+1:]))
|
domain=mail[mail.find('@') + 1:]))
|
||||||
|
|
||||||
# Get random UID/GID
|
# Get random UID/GID
|
||||||
uid_check = gid_check = 0
|
uid_check = gid_check = 0
|
||||||
|
@ -141,24 +141,24 @@ def user_create(auth, username, firstname, lastname, mail, password,
|
||||||
fullname = '%s %s' % (firstname, lastname)
|
fullname = '%s %s' % (firstname, lastname)
|
||||||
rdn = 'uid=%s,ou=users' % username
|
rdn = 'uid=%s,ou=users' % username
|
||||||
char_set = string.ascii_uppercase + string.digits
|
char_set = string.ascii_uppercase + string.digits
|
||||||
salt = ''.join(random.sample(char_set,8))
|
salt = ''.join(random.sample(char_set, 8))
|
||||||
salt = '$1$' + salt + '$'
|
salt = '$1$' + salt + '$'
|
||||||
user_pwd = '{CRYPT}' + crypt.crypt(str(password), salt)
|
user_pwd = '{CRYPT}' + crypt.crypt(str(password), salt)
|
||||||
attr_dict = {
|
attr_dict = {
|
||||||
'objectClass' : ['mailAccount', 'inetOrgPerson', 'posixAccount'],
|
'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount'],
|
||||||
'givenName' : firstname,
|
'givenName': firstname,
|
||||||
'sn' : lastname,
|
'sn': lastname,
|
||||||
'displayName' : fullname,
|
'displayName': fullname,
|
||||||
'cn' : fullname,
|
'cn': fullname,
|
||||||
'uid' : username,
|
'uid': username,
|
||||||
'mail' : mail,
|
'mail': mail,
|
||||||
'maildrop' : username,
|
'maildrop': username,
|
||||||
'mailuserquota' : mailbox_quota,
|
'mailuserquota': mailbox_quota,
|
||||||
'userPassword' : user_pwd,
|
'userPassword': user_pwd,
|
||||||
'gidNumber' : uid,
|
'gidNumber': uid,
|
||||||
'uidNumber' : uid,
|
'uidNumber': uid,
|
||||||
'homeDirectory' : '/home/' + username,
|
'homeDirectory': '/home/' + username,
|
||||||
'loginShell' : '/bin/false'
|
'loginShell': '/bin/false'
|
||||||
}
|
}
|
||||||
|
|
||||||
# If it is the first user, add some aliases
|
# If it is the first user, add some aliases
|
||||||
|
@ -166,12 +166,12 @@ def user_create(auth, username, firstname, lastname, mail, password,
|
||||||
with open('/etc/yunohost/current_host') as f:
|
with open('/etc/yunohost/current_host') as f:
|
||||||
main_domain = f.readline().rstrip()
|
main_domain = f.readline().rstrip()
|
||||||
aliases = [
|
aliases = [
|
||||||
'root@'+ main_domain,
|
'root@' + main_domain,
|
||||||
'admin@'+ main_domain,
|
'admin@' + main_domain,
|
||||||
'webmaster@'+ main_domain,
|
'webmaster@' + main_domain,
|
||||||
'postmaster@'+ main_domain,
|
'postmaster@' + main_domain,
|
||||||
]
|
]
|
||||||
attr_dict['mail'] = [ attr_dict['mail'] ] + aliases
|
attr_dict['mail'] = [attr_dict['mail']] + aliases
|
||||||
|
|
||||||
# If exists, remove the redirection from the SSO
|
# If exists, remove the redirection from the SSO
|
||||||
try:
|
try:
|
||||||
|
@ -192,7 +192,6 @@ def user_create(auth, username, firstname, lastname, mail, password,
|
||||||
raise MoulinetteError(errno.EPERM,
|
raise MoulinetteError(errno.EPERM,
|
||||||
m18n.n('ssowat_persistent_conf_write_error', error=e.strerror))
|
m18n.n('ssowat_persistent_conf_write_error', error=e.strerror))
|
||||||
|
|
||||||
|
|
||||||
if auth.add(rdn, attr_dict):
|
if auth.add(rdn, attr_dict):
|
||||||
# Invalidate passwd to take user creation into account
|
# Invalidate passwd to take user creation into account
|
||||||
subprocess.call(['nscd', '-i', 'passwd'])
|
subprocess.call(['nscd', '-i', 'passwd'])
|
||||||
|
@ -200,7 +199,7 @@ def user_create(auth, username, firstname, lastname, mail, password,
|
||||||
# Update SFTP user group
|
# Update SFTP user group
|
||||||
memberlist = auth.search(filter='cn=sftpusers', attrs=['memberUid'])[0]['memberUid']
|
memberlist = auth.search(filter='cn=sftpusers', attrs=['memberUid'])[0]['memberUid']
|
||||||
memberlist.append(username)
|
memberlist.append(username)
|
||||||
if auth.update('cn=sftpusers,ou=groups', { 'memberUid': memberlist }):
|
if auth.update('cn=sftpusers,ou=groups', {'memberUid': memberlist}):
|
||||||
try:
|
try:
|
||||||
# Attempt to create user home folder
|
# Attempt to create user home folder
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
|
@ -210,12 +209,12 @@ def user_create(auth, username, firstname, lastname, mail, password,
|
||||||
logger.warning(m18n.n('user_home_creation_failed'),
|
logger.warning(m18n.n('user_home_creation_failed'),
|
||||||
exc_info=1)
|
exc_info=1)
|
||||||
app_ssowatconf(auth)
|
app_ssowatconf(auth)
|
||||||
#TODO: Send a welcome mail to user
|
# TODO: Send a welcome mail to user
|
||||||
logger.success(m18n.n('user_created'))
|
logger.success(m18n.n('user_created'))
|
||||||
hook_callback('post_user_create',
|
hook_callback('post_user_create',
|
||||||
args=[username, mail, password, firstname, lastname])
|
args=[username, mail, password, firstname, lastname])
|
||||||
|
|
||||||
return { 'fullname' : fullname, 'username' : username, 'mail' : mail }
|
return {'fullname': fullname, 'username': username, 'mail': mail}
|
||||||
|
|
||||||
raise MoulinetteError(169, m18n.n('user_creation_failed'))
|
raise MoulinetteError(169, m18n.n('user_creation_failed'))
|
||||||
|
|
||||||
|
@ -238,9 +237,11 @@ def user_delete(auth, username, purge=False):
|
||||||
|
|
||||||
# Update SFTP user group
|
# Update SFTP user group
|
||||||
memberlist = auth.search(filter='cn=sftpusers', attrs=['memberUid'])[0]['memberUid']
|
memberlist = auth.search(filter='cn=sftpusers', attrs=['memberUid'])[0]['memberUid']
|
||||||
try: memberlist.remove(username)
|
try:
|
||||||
except: pass
|
memberlist.remove(username)
|
||||||
if auth.update('cn=sftpusers,ou=groups', { 'memberUid': memberlist }):
|
except:
|
||||||
|
pass
|
||||||
|
if auth.update('cn=sftpusers,ou=groups', {'memberUid': memberlist}):
|
||||||
if purge:
|
if purge:
|
||||||
subprocess.call(['rm', '-rf', '/home/{0}'.format(username)])
|
subprocess.call(['rm', '-rf', '/home/{0}'.format(username)])
|
||||||
else:
|
else:
|
||||||
|
@ -298,34 +299,34 @@ def user_update(auth, username, firstname=None, lastname=None, mail=None,
|
||||||
|
|
||||||
if change_password:
|
if change_password:
|
||||||
char_set = string.ascii_uppercase + string.digits
|
char_set = string.ascii_uppercase + string.digits
|
||||||
salt = ''.join(random.sample(char_set,8))
|
salt = ''.join(random.sample(char_set, 8))
|
||||||
salt = '$1$' + salt + '$'
|
salt = '$1$' + salt + '$'
|
||||||
new_attr_dict['userPassword'] = '{CRYPT}' + crypt.crypt(str(change_password), salt)
|
new_attr_dict['userPassword'] = '{CRYPT}' + crypt.crypt(str(change_password), salt)
|
||||||
|
|
||||||
if mail:
|
if mail:
|
||||||
auth.validate_uniqueness({ 'mail': mail })
|
auth.validate_uniqueness({'mail': mail})
|
||||||
if mail[mail.find('@')+1:] not in domains:
|
if mail[mail.find('@') + 1:] not in domains:
|
||||||
raise MoulinetteError(errno.EINVAL,
|
raise MoulinetteError(errno.EINVAL,
|
||||||
m18n.n('mail_domain_unknown',
|
m18n.n('mail_domain_unknown',
|
||||||
domain=mail[mail.find('@')+1:]))
|
domain=mail[mail.find('@') + 1:]))
|
||||||
del user['mail'][0]
|
del user['mail'][0]
|
||||||
new_attr_dict['mail'] = [mail] + user['mail']
|
new_attr_dict['mail'] = [mail] + user['mail']
|
||||||
|
|
||||||
if add_mailalias:
|
if add_mailalias:
|
||||||
if not isinstance(add_mailalias, list):
|
if not isinstance(add_mailalias, list):
|
||||||
add_mailalias = [ add_mailalias ]
|
add_mailalias = [add_mailalias]
|
||||||
for mail in add_mailalias:
|
for mail in add_mailalias:
|
||||||
auth.validate_uniqueness({ 'mail': mail })
|
auth.validate_uniqueness({'mail': mail})
|
||||||
if mail[mail.find('@')+1:] not in domains:
|
if mail[mail.find('@') + 1:] not in domains:
|
||||||
raise MoulinetteError(errno.EINVAL,
|
raise MoulinetteError(errno.EINVAL,
|
||||||
m18n.n('mail_domain_unknown',
|
m18n.n('mail_domain_unknown',
|
||||||
domain=mail[mail.find('@')+1:]))
|
domain=mail[mail.find('@') + 1:]))
|
||||||
user['mail'].append(mail)
|
user['mail'].append(mail)
|
||||||
new_attr_dict['mail'] = user['mail']
|
new_attr_dict['mail'] = user['mail']
|
||||||
|
|
||||||
if remove_mailalias:
|
if remove_mailalias:
|
||||||
if not isinstance(remove_mailalias, list):
|
if not isinstance(remove_mailalias, list):
|
||||||
remove_mailalias = [ remove_mailalias ]
|
remove_mailalias = [remove_mailalias]
|
||||||
for mail in remove_mailalias:
|
for mail in remove_mailalias:
|
||||||
if len(user['mail']) > 1 and mail in user['mail'][1:]:
|
if len(user['mail']) > 1 and mail in user['mail'][1:]:
|
||||||
user['mail'].remove(mail)
|
user['mail'].remove(mail)
|
||||||
|
@ -336,7 +337,7 @@ def user_update(auth, username, firstname=None, lastname=None, mail=None,
|
||||||
|
|
||||||
if add_mailforward:
|
if add_mailforward:
|
||||||
if not isinstance(add_mailforward, list):
|
if not isinstance(add_mailforward, list):
|
||||||
add_mailforward = [ add_mailforward ]
|
add_mailforward = [add_mailforward]
|
||||||
for mail in add_mailforward:
|
for mail in add_mailforward:
|
||||||
if mail in user['maildrop'][1:]:
|
if mail in user['maildrop'][1:]:
|
||||||
continue
|
continue
|
||||||
|
@ -345,7 +346,7 @@ def user_update(auth, username, firstname=None, lastname=None, mail=None,
|
||||||
|
|
||||||
if remove_mailforward:
|
if remove_mailforward:
|
||||||
if not isinstance(remove_mailforward, list):
|
if not isinstance(remove_mailforward, list):
|
||||||
remove_mailforward = [ remove_mailforward ]
|
remove_mailforward = [remove_mailforward]
|
||||||
for mail in remove_mailforward:
|
for mail in remove_mailforward:
|
||||||
if len(user['maildrop']) > 1 and mail in user['maildrop'][1:]:
|
if len(user['maildrop']) > 1 and mail in user['maildrop'][1:]:
|
||||||
user['maildrop'].remove(mail)
|
user['maildrop'].remove(mail)
|
||||||
|
@ -378,9 +379,9 @@ def user_info(auth, username):
|
||||||
]
|
]
|
||||||
|
|
||||||
if len(username.split('@')) is 2:
|
if len(username.split('@')) is 2:
|
||||||
filter = 'mail='+ username
|
filter = 'mail=' + username
|
||||||
else:
|
else:
|
||||||
filter = 'uid='+ username
|
filter = 'uid=' + username
|
||||||
|
|
||||||
result = auth.search('ou=users,dc=yunohost,dc=org', filter, user_attrs)
|
result = auth.search('ou=users,dc=yunohost,dc=org', filter, user_attrs)
|
||||||
|
|
||||||
|
@ -436,8 +437,8 @@ def user_info(auth, username):
|
||||||
storage_use += ' (%s%%)' % percentage
|
storage_use += ' (%s%%)' % percentage
|
||||||
|
|
||||||
result_dict['mailbox-quota'] = {
|
result_dict['mailbox-quota'] = {
|
||||||
'limit' : userquota if is_limited else m18n.n('unlimit'),
|
'limit': userquota if is_limited else m18n.n('unlimit'),
|
||||||
'use' : storage_use
|
'use': storage_use
|
||||||
}
|
}
|
||||||
|
|
||||||
if result:
|
if result:
|
||||||
|
@ -445,8 +446,9 @@ def user_info(auth, username):
|
||||||
else:
|
else:
|
||||||
raise MoulinetteError(167, m18n.n('user_info_failed'))
|
raise MoulinetteError(167, m18n.n('user_info_failed'))
|
||||||
|
|
||||||
|
|
||||||
def _convertSize(num, suffix=''):
|
def _convertSize(num, suffix=''):
|
||||||
for unit in ['K','M','G','T','P','E','Z']:
|
for unit in ['K', 'M', 'G', 'T', 'P', 'E', 'Z']:
|
||||||
if abs(num) < 1024.0:
|
if abs(num) < 1024.0:
|
||||||
return "%3.1f%s%s" % (num, unit, suffix)
|
return "%3.1f%s%s" % (num, unit, suffix)
|
||||||
num /= 1024.0
|
num /= 1024.0
|
||||||
|
|
|
@ -424,6 +424,7 @@ def get_installed_version(*pkgnames, **kwargs):
|
||||||
return versions[pkgnames[0]]
|
return versions[pkgnames[0]]
|
||||||
return versions
|
return versions
|
||||||
|
|
||||||
|
|
||||||
def meets_version_specifier(pkgname, specifier):
|
def meets_version_specifier(pkgname, specifier):
|
||||||
"""Check if a package installed version meets specifier"""
|
"""Check if a package installed version meets specifier"""
|
||||||
spec = SpecifierSet(specifier)
|
spec = SpecifierSet(specifier)
|
||||||
|
|
20
src/yunohost/vendor/acme_tiny/acme_tiny.py
vendored
20
src/yunohost/vendor/acme_tiny/acme_tiny.py
vendored
|
@ -1,5 +1,17 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
import argparse, subprocess, json, os, sys, base64, binascii, time, hashlib, re, copy, textwrap, logging
|
import argparse
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import base64
|
||||||
|
import binascii
|
||||||
|
import time
|
||||||
|
import hashlib
|
||||||
|
import re
|
||||||
|
import copy
|
||||||
|
import textwrap
|
||||||
|
import logging
|
||||||
try:
|
try:
|
||||||
from urllib.request import urlopen # Python 3
|
from urllib.request import urlopen # Python 3
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
@ -12,6 +24,7 @@ LOGGER = logging.getLogger(__name__)
|
||||||
LOGGER.addHandler(logging.StreamHandler())
|
LOGGER.addHandler(logging.StreamHandler())
|
||||||
LOGGER.setLevel(logging.INFO)
|
LOGGER.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
|
||||||
def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA):
|
def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA):
|
||||||
# helper function base64 encode for jose spec
|
# helper function base64 encode for jose spec
|
||||||
def _b64(b):
|
def _b64(b):
|
||||||
|
@ -26,7 +39,7 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA):
|
||||||
raise IOError("OpenSSL Error: {0}".format(err))
|
raise IOError("OpenSSL Error: {0}".format(err))
|
||||||
pub_hex, pub_exp = re.search(
|
pub_hex, pub_exp = re.search(
|
||||||
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
|
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
|
||||||
out.decode('utf8'), re.MULTILINE|re.DOTALL).groups()
|
out.decode('utf8'), re.MULTILINE | re.DOTALL).groups()
|
||||||
pub_exp = "{0:x}".format(int(pub_exp))
|
pub_exp = "{0:x}".format(int(pub_exp))
|
||||||
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
|
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
|
||||||
header = {
|
header = {
|
||||||
|
@ -72,7 +85,7 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA):
|
||||||
common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8'))
|
common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8'))
|
||||||
if common_name is not None:
|
if common_name is not None:
|
||||||
domains.add(common_name.group(1))
|
domains.add(common_name.group(1))
|
||||||
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL)
|
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE | re.DOTALL)
|
||||||
if subject_alt_names is not None:
|
if subject_alt_names is not None:
|
||||||
for san in subject_alt_names.group(1).split(", "):
|
for san in subject_alt_names.group(1).split(", "):
|
||||||
if san.startswith("DNS:"):
|
if san.startswith("DNS:"):
|
||||||
|
@ -165,6 +178,7 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA):
|
||||||
return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
|
return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
|
||||||
"\n".join(textwrap.wrap(base64.b64encode(result).decode('utf8'), 64)))
|
"\n".join(textwrap.wrap(base64.b64encode(result).decode('utf8'), 64)))
|
||||||
|
|
||||||
|
|
||||||
def main(argv):
|
def main(argv):
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
|
|
4
tests/test_actionmap.py
Normal file
4
tests/test_actionmap.py
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
def test_yaml_syntax():
|
||||||
|
yaml.load(open("data/actionsmap/yunohost.yml"))
|
Loading…
Add table
Reference in a new issue