Merge branch 'stretch-unstable' into dump-app-debug-extract-from-the-core

This commit is contained in:
Alexandre Aubin 2019-11-15 16:38:14 +01:00 committed by GitHub
commit 39e109a5b6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
44 changed files with 2597 additions and 2429 deletions

View file

@ -441,6 +441,21 @@ domain:
- !!str ^[0-9]+$
- "pattern_positive_number"
### domain_maindomain()
main-domain:
action_help: Check the current main domain, or change it
deprecated_alias:
- maindomain
api:
- GET /domains/main
- PUT /domains/main
arguments:
-n:
full: --new-main-domain
help: Change the current main domain
extra:
pattern: *pattern_domain
### certificate_status()
cert-status:
action_help: List status of current certificates (all by default).
@ -528,38 +543,6 @@ app:
category_help: Manage apps
actions:
### app_fetchlist()
fetchlist:
action_help: Fetch application lists from app servers, or register a new one.
api: PUT /appslists
arguments:
-n:
full: --name
help: Name of the list to fetch (fetches all registered lists if empty)
extra:
pattern: &pattern_listname
- !!str ^[a-z0-9_]+$
- "pattern_listname"
-u:
full: --url
help: URL of a new application list to register. To be specified with -n.
### app_listlists()
listlists:
action_help: List registered application lists
api: GET /appslists
### app_removelist()
removelist:
action_help: Remove and forget about a given application list
api: DELETE /appslists
arguments:
name:
help: Name of the list to remove
extra:
ask: ask_list_to_remove
pattern: *pattern_listname
### app_list()
list:
action_help: List apps
@ -698,30 +681,6 @@ app:
help: Delete the key
action: store_true
### app_checkport()
checkport:
action_help: Check availability of a local port
api: GET /tools/checkport
deprecated: true
arguments:
port:
help: Port to check
extra:
pattern: &pattern_port
- !!str ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
- "pattern_port"
### app_checkurl()
checkurl:
action_help: Check availability of a web path
api: GET /tools/checkurl
deprecated: True
arguments:
url:
help: Url to check
-a:
full: --app
help: Write domain & path to app settings for further checks
### app_register_url()
register-url:
@ -736,32 +695,6 @@ app:
help: The path to be registered (e.g. /coffee)
### app_initdb()
initdb:
action_help: Create database and initialize it with optionnal attached script
api: POST /tools/initdb
deprecated: true
arguments:
user:
help: Name of the DB user
-p:
full: --password
help: Password of the DB (generated unless set)
-d:
full: --db
help: DB name (user unless set)
-s:
full: --sql
help: Initial SQL file
### app_debug()
debug:
action_help: Display all debug informations for an application
api: GET /apps/<app>/debug
arguments:
app:
help: App name
### app_makedefault()
makedefault:
action_help: Redirect domain root to an app
@ -968,147 +901,6 @@ backup:
pattern: *pattern_backup_archive_name
#############################
# Monitor #
#############################
monitor:
category_help: Monitor the server
actions:
### monitor_disk()
disk:
action_help: Monitor disk space and usage
api: GET /monitor/disk
arguments:
-f:
full: --filesystem
help: Show filesystem disk space
action: append_const
const: filesystem
dest: units
-t:
full: --io
help: Show I/O throughput
action: append_const
const: io
dest: units
-m:
full: --mountpoint
help: Monitor only the device mounted on MOUNTPOINT
action: store
-H:
full: --human-readable
help: Print sizes in human readable format
action: store_true
### monitor_network()
network:
action_help: Monitor network interfaces
api: GET /monitor/network
arguments:
-u:
full: --usage
help: Show interfaces bit rates
action: append_const
const: usage
dest: units
-i:
full: --infos
help: Show network informations
action: append_const
const: infos
dest: units
-c:
full: --check
help: Check network configuration
action: append_const
const: check
dest: units
-H:
full: --human-readable
help: Print sizes in human readable format
action: store_true
### monitor_system()
system:
action_help: Monitor system informations and usage
api: GET /monitor/system
arguments:
-m:
full: --memory
help: Show memory usage
action: append_const
const: memory
dest: units
-c:
full: --cpu
help: Show CPU usage and load
action: append_const
const: cpu
dest: units
-p:
full: --process
help: Show processes summary
action: append_const
const: process
dest: units
-u:
full: --uptime
help: Show the system uptime
action: append_const
const: uptime
dest: units
-i:
full: --infos
help: Show system informations
action: append_const
const: infos
dest: units
-H:
full: --human-readable
help: Print sizes in human readable format
action: store_true
### monitor_updatestats()
update-stats:
action_help: Update monitoring statistics
api: POST /monitor/stats
arguments:
period:
help: Time period to update
choices:
- day
- week
- month
### monitor_showstats()
show-stats:
action_help: Show monitoring statistics
api: GET /monitor/stats
arguments:
period:
help: Time period to show
choices:
- day
- week
- month
### monitor_enable()
enable:
action_help: Enable server monitoring
api: PUT /monitor
arguments:
-s:
full: --with-stats
help: Enable monitoring statistics
action: store_true
### monitor_disable()
disable:
api: DELETE /monitor
action_help: Disable server monitoring
#############################
# Settings #
#############################
@ -1542,12 +1334,9 @@ tools:
### tools_maindomain()
maindomain:
action_help: Check the current main domain, or change it
api:
- GET /domains/main
- PUT /domains/main
arguments:
-n:
full: --new-domain
full: --new-main-domain
help: Change the current main domain
extra:
pattern: *pattern_domain
@ -1606,26 +1395,6 @@ tools:
help: Upgrade only the system packages
action: store_true
### tools_diagnosis()
diagnosis:
action_help: YunoHost diagnosis
api: GET /diagnosis
arguments:
-p:
full: --private
help: Show private data (domain, IP)
action: store_true
### tools_port_available()
port-available:
action_help: Check availability of a local port
api: GET /tools/portavailable
arguments:
port:
help: Port to check
extra:
pattern: *pattern_port
### tools_shell()
shell:
action_help: Launch a development shell
@ -1835,7 +1604,7 @@ log:
api: GET /logs
arguments:
category:
help: Log category to display (default operations), could be operation, history, package, system, access, service or app
help: Log category to display (default operations), could be operation, history, package, system, access, service or app
nargs: "*"
-l:
full: --limit
@ -1861,3 +1630,59 @@ log:
--share:
help: Share the full log using yunopaste
action: store_true
#############################
# Diagnosis #
#############################
diagnosis:
category_help: Look for possible issues on the server
actions:
list:
action_help: List diagnosis categories
api: GET /diagnosis/list
show:
action_help: Show most recents diagnosis results
api: GET /diagnosis/show
arguments:
categories:
help: Diagnosis categories to display (all by default)
nargs: "*"
--full:
help: Display additional information
action: store_true
--issues:
help: Only display issues
action: store_true
--share:
help: Share the logs using yunopaste
action: store_true
run:
action_help: Show most recents diagnosis results
api: POST /diagnosis/run
arguments:
categories:
help: Diagnosis categories to run (all by default)
nargs: "*"
--force:
help: Ignore the cached report even if it is still 'fresh'
action: store_true
ignore:
action_help: Configure some diagnosis results to be ignored and therefore not considered as actual issues
api: POST /diagnosis/ignore
arguments:
--add-filter:
help: "Add a filter. The first element should be a diagnosis category, and other criterias can be provided using the infos from the 'meta' sections in 'yunohost diagnosis show'. For example: 'dnsrecords domain=yolo.test category=xmpp'"
nargs: "*"
metavar: CRITERIA
--remove-filter:
help: Remove a filter (it should be an existing filter as listed with --list)
nargs: "*"
metavar: CRITERIA
--list:
help: List active ignore filters
action: store_true

View file

@ -17,13 +17,38 @@ ynh_find_port () {
ynh_handle_getopts_args "$@"
test -n "$port" || ynh_die --message="The argument of ynh_find_port must be a valid port."
while netcat -z 127.0.0.1 $port # Check if the port is free
while ss -nltu | grep -q -w :$port # Check if the port is free
do
port=$((port+1)) # Else, pass to next port
done
echo $port
}
# Test if a port is available
#
# example: ynh_port_available --port=1234 || ynh_die "Port 1234 is needs to be available for this app"
#
# usage: ynh_find_port --port=XYZ
# | arg: -p, --port - port to check
#
# Requires YunoHost version 3.7.x or higher.
ynh_port_available () {
# Declare an array to define the options of this helper.
local legacy_args=p
declare -Ar args_array=( [p]=port= )
local port
# Manage arguments with getopts
ynh_handle_getopts_args "$@"
if ss -nltu | grep -q -w :$port
then
return 1
else
return 0
fi
}
# Validate an IP address
#
# usage: ynh_validate_ip --family=family --ip_address=ip_address

View file

@ -257,6 +257,7 @@ ynh_webpath_register () {
# re:/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
# re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_create() {
declare -Ar args_array=( [p]=permission= [u]=url= [a]=allowed= )
local permission
@ -273,7 +274,7 @@ ynh_permission_create() {
if [[ -n ${allowed:-} ]]; then
allowed=",allowed=['${allowed//';'/"','"}']"
fi
yunohost tools shell -c "from yunohost.permission import permission_create; permission_create('$app.$permission', url=$url ${allowed:-} , sync_perm=False)"
}
@ -284,6 +285,7 @@ ynh_permission_create() {
# usage: ynh_permission_delete --permission "permission"
# | arg: permission - the name for the permission (by default a permission named "main" is removed automatically when the app is removed)
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_delete() {
declare -Ar args_array=( [p]=permission= )
local permission
@ -292,12 +294,27 @@ ynh_permission_delete() {
yunohost tools shell -c "from yunohost.permission import permission_delete; permission_delete('$app.$permission', sync_perm=False)"
}
# Check if a permission exists
#
# usage: ynh_permission_exists --permission=permission
# | arg: -p, --permission - the permission to check
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_exists() {
declare -Ar args_array=( [p]=permission= )
local permission
ynh_handle_getopts_args "$@"
yunohost user permission list -s | grep -w -q "$app.$permission"
}
# Redefine the url associated to a permission
#
# usage: ynh_permission_url --permission "permission" --url "url"
# | arg: permission - the name for the permission (by default a permission named "main" is removed automatically when the app is removed)
# | arg: url - (optional) URL for which access will be allowed/forbidden
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_url() {
declare -Ar args_array=([p]=permission= [u]=url=)
local permission
@ -322,6 +339,7 @@ ynh_permission_url() {
# | arg: remove - the list of group or users to remove from the permission
#
# example: ynh_permission_update --permission admin --add samdoe --remove all_users
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_update() {
declare -Ar args_array=( [p]=permission= [a]=add= [r]=remove= )
local permission

View file

@ -0,0 +1,60 @@
#!/usr/bin/env python
import os
from moulinette.utils.filesystem import read_file
from yunohost.diagnosis import Diagnoser
from yunohost.utils.packages import ynh_packages_version
class BaseSystemDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 3600 * 24
dependencies = []
def run(self):
# Kernel version
kernel_version = read_file('/proc/sys/kernel/osrelease').strip()
yield dict(meta={"test": "kernel"},
status="INFO",
summary=("diagnosis_basesystem_kernel", {"kernel_version": kernel_version}))
# FIXME / TODO : add virt/vm technology using systemd-detect-virt and/or machine arch
# Debian release
debian_version = read_file("/etc/debian_version").strip()
yield dict(meta={"test": "host"},
status="INFO",
summary=("diagnosis_basesystem_host", {"debian_version": debian_version}))
# Yunohost packages versions
ynh_packages = ynh_packages_version()
# We check if versions are consistent (e.g. all 3.6 and not 3 packages with 3.6 and the other with 3.5)
# This is a classical issue for upgrades that failed in the middle
# (or people upgrading half of the package because they did 'apt upgrade' instead of 'dist-upgrade')
# Here, ynh_core_version is for example "3.5.4.12", so [:3] is "3.5" and we check it's the same for all packages
ynh_core_version = ynh_packages["yunohost"]["version"]
consistent_versions = all(infos["version"][:3] == ynh_core_version[:3] for infos in ynh_packages.values())
ynh_version_details = [("diagnosis_basesystem_ynh_single_version", (package, infos["version"], infos["repo"]))
for package, infos in ynh_packages.items()]
if consistent_versions:
yield dict(meta={"test": "ynh_versions"},
data={"main_version": ynh_core_version, "repo": ynh_packages["yunohost"]["repo"]},
status="INFO",
summary=("diagnosis_basesystem_ynh_main_version",
{"main_version": ynh_core_version,
"repo": ynh_packages["yunohost"]["repo"]}),
details=ynh_version_details)
else:
yield dict(meta={"test": "ynh_versions"},
data={"main_version": ynh_core_version, "repo": ynh_packages["yunohost"]["repo"]},
status="ERROR",
summary=("diagnosis_basesystem_ynh_inconsistent_versions", {}),
details=ynh_version_details)
def main(args, env, loggers):
return BaseSystemDiagnoser(args, env, loggers).diagnose()

View file

@ -0,0 +1,150 @@
#!/usr/bin/env python
import os
import random
from moulinette.utils.network import download_text
from moulinette.utils.process import check_output
from moulinette.utils.filesystem import read_file
from yunohost.diagnosis import Diagnoser
class IPDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 60
dependencies = []
def run(self):
# ############################################################ #
# PING : Check that we can ping outside at least in ipv4 or v6 #
# ############################################################ #
can_ping_ipv4 = self.can_ping_outside(4)
can_ping_ipv6 = self.can_ping_outside(6)
if not can_ping_ipv4 and not can_ping_ipv6:
yield dict(meta={"test": "ping"},
status="ERROR",
summary=("diagnosis_ip_not_connected_at_all", {}))
# Not much else we can do if there's no internet at all
return
# ###################################################### #
# DNS RESOLUTION : Check that we can resolve domain name #
# (later needed to talk to ip. and ip6.yunohost.org) #
# ###################################################### #
can_resolve_dns = self.can_resolve_dns()
# In every case, we can check that resolvconf seems to be okay
# (symlink managed by resolvconf service + pointing to dnsmasq)
good_resolvconf = self.resolvconf_is_symlink() and self.resolvconf_points_to_localhost()
# If we can't resolve domain names at all, that's a pretty big issue ...
# If it turns out that at the same time, resolvconf is bad, that's probably
# the cause of this, so we use a different message in that case
if not can_resolve_dns:
yield dict(meta={"test": "dnsresolv"},
status="ERROR",
summary=("diagnosis_ip_broken_dnsresolution", {}) if good_resolvconf
else ("diagnosis_ip_broken_resolvconf", {}))
return
# Otherwise, if the resolv conf is bad but we were able to resolve domain name,
# still warn that we're using a weird resolv conf ...
elif not good_resolvconf:
yield dict(meta={"test": "dnsresolv"},
status="WARNING",
summary=("diagnosis_ip_weird_resolvconf", {}),
details=[("diagnosis_ip_weird_resolvconf_details", ())])
else:
yield dict(meta={"test": "dnsresolv"},
status="SUCCESS",
summary=("diagnosis_ip_dnsresolution_working", {}))
# ##################################################### #
# IP DIAGNOSIS : Check that we're actually able to talk #
# to a web server to fetch current IPv4 and v6 #
# ##################################################### #
ipv4 = self.get_public_ip(4) if can_ping_ipv4 else None
ipv6 = self.get_public_ip(6) if can_ping_ipv6 else None
yield dict(meta={"test": "ip", "version": 4},
data=ipv4,
status="SUCCESS" if ipv4 else "ERROR",
summary=("diagnosis_ip_connected_ipv4", {}) if ipv4
else ("diagnosis_ip_no_ipv4", {}))
yield dict(meta={"test": "ip", "version": 6},
data=ipv6,
status="SUCCESS" if ipv6 else "WARNING",
summary=("diagnosis_ip_connected_ipv6", {}) if ipv6
else ("diagnosis_ip_no_ipv6", {}))
# TODO / FIXME : add some attempt to detect ISP (using whois ?) ?
def can_ping_outside(self, protocol=4):
assert protocol in [4, 6], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr(protocol)
# We can know that ipv6 is not available directly if this file does not exists
if protocol == 6 and not os.path.exists("/proc/net/if_inet6"):
return False
# If we are indeed connected in ipv4 or ipv6, we should find a default route
routes = check_output("ip -%s route" % protocol).split("\n")
if not [r for r in routes if r.startswith("default")]:
return False
# We use the resolver file as a list of well-known, trustable (ie not google ;)) IPs that we can ping
resolver_file = "/usr/share/yunohost/templates/dnsmasq/plain/resolv.dnsmasq.conf"
resolvers = [r.split(" ")[1] for r in read_file(resolver_file).split("\n") if r.startswith("nameserver")]
if protocol == 4:
resolvers = [r for r in resolvers if ":" not in r]
if protocol == 6:
resolvers = [r for r in resolvers if ":" in r]
assert resolvers != [], "Uhoh, need at least one IPv%s DNS resolver in %s ..." % (protocol, resolver_file)
# So let's try to ping the first 4~5 resolvers (shuffled)
# If we succesfully ping any of them, we conclude that we are indeed connected
def ping(protocol, target):
return os.system("ping%s -c1 -W 3 %s >/dev/null 2>/dev/null" % ("" if protocol == 4 else "6", target)) == 0
random.shuffle(resolvers)
return any(ping(protocol, resolver) for resolver in resolvers[:5])
def can_resolve_dns(self):
return os.system("dig +short ip.yunohost.org >/dev/null 2>/dev/null") == 0
def resolvconf_is_symlink(self):
return os.path.realpath("/etc/resolv.conf") == "/run/resolvconf/resolv.conf"
def resolvconf_points_to_localhost(self):
file_ = "/etc/resolv.conf"
resolvers = [r.split(" ")[1] for r in read_file(file_).split("\n") if r.startswith("nameserver")]
return resolvers == ["127.0.0.1"]
def get_public_ip(self, protocol=4):
# FIXME - TODO : here we assume that DNS resolution for ip.yunohost.org is working
# but if we want to be able to diagnose DNS resolution issues independently from
# internet connectivity, we gotta rely on fixed IPs first....
assert protocol in [4, 6], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr(protocol)
url = 'https://ip%s.yunohost.org' % ('6' if protocol == 6 else '')
try:
return download_text(url, timeout=30).strip()
except Exception as e:
self.logger_debug("Could not get public IPv%s : %s" % (str(protocol), str(e)))
return None
def main(args, env, loggers):
return IPDiagnoser(args, env, loggers).diagnose()

View file

@ -0,0 +1,89 @@
#!/usr/bin/env python
import os
from moulinette.utils.process import check_output
from moulinette.utils.filesystem import read_file
from yunohost.diagnosis import Diagnoser
from yunohost.domain import domain_list, _build_dns_conf, _get_maindomain
class DNSRecordsDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 3600 * 24
dependencies = ["ip"]
def run(self):
resolvers = read_file("/etc/resolv.dnsmasq.conf").split("\n")
ipv4_resolvers = [r.split(" ")[1] for r in resolvers if r.startswith("nameserver") and ":" not in r]
# FIXME some day ... handle ipv4-only and ipv6-only servers. For now we assume we have at least ipv4
assert ipv4_resolvers != [], "Uhoh, need at least one IPv4 DNS resolver ..."
self.resolver = ipv4_resolvers[0]
main_domain = _get_maindomain()
all_domains = domain_list()["domains"]
for domain in all_domains:
self.logger_debug("Diagnosing DNS conf for %s" % domain)
for report in self.check_domain(domain, domain == main_domain):
yield report
# FIXME : somewhere, should implement a check for reverse DNS ...
# FIXME / TODO : somewhere, could also implement a check for domain expiring soon
def check_domain(self, domain, is_main_domain):
expected_configuration = _build_dns_conf(domain)
# Here if there are no AAAA record, we should add something to expect "no" AAAA record
# to properly diagnose situations where people have a AAAA record but no IPv6
for category, records in expected_configuration.items():
discrepancies = []
for r in records:
current_value = self.get_current_record(domain, r["name"], r["type"]) or "None"
expected_value = r["value"] if r["value"] != "@" else domain + "."
if current_value == "None":
discrepancies.append(("diagnosis_dns_missing_record", (r["type"], r["name"], expected_value)))
elif current_value != expected_value:
discrepancies.append(("diagnosis_dns_discrepancy", (r["type"], r["name"], expected_value, current_value)))
if discrepancies:
status = "ERROR" if (category == "basic" or (is_main_domain and category != "extra")) else "WARNING"
summary = ("diagnosis_dns_bad_conf", {"domain": domain, "category": category})
else:
status = "SUCCESS"
summary = ("diagnosis_dns_good_conf", {"domain": domain, "category": category})
output = dict(meta={"domain": domain, "category": category},
status=status,
summary=summary)
if discrepancies:
output["details"] = discrepancies
yield output
def get_current_record(self, domain, name, type_):
if name == "@":
command = "dig +short @%s %s %s" % (self.resolver, type_, domain)
else:
command = "dig +short @%s %s %s.%s" % (self.resolver, type_, name, domain)
# FIXME : gotta handle case where this command fails ...
# e.g. no internet connectivity (dependency mechanism to good result from 'ip' diagosis ?)
# or the resolver is unavailable for some reason
output = check_output(command).strip()
if output.startswith('"') and output.endswith('"'):
output = '"' + ' '.join(output.replace('"', ' ').split()) + '"'
return output
def main(args, env, loggers):
return DNSRecordsDiagnoser(args, env, loggers).diagnose()

View file

@ -0,0 +1,52 @@
#!/usr/bin/env python
import os
import requests
from yunohost.diagnosis import Diagnoser
from yunohost.utils.error import YunohostError
class PortsDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 3600
dependencies = ["ip"]
def run(self):
# FIXME / TODO : in the future, maybe we want to report different
# things per port depending on how important they are
# (e.g. XMPP sounds to me much less important than other ports)
# Ideally, a port could be related to a service...
# FIXME / TODO : for now this list of port is hardcoded, might want
# to fetch this from the firewall.yml in /etc/yunohost/
ports = [22, 25, 53, 80, 443, 587, 993, 5222, 5269]
try:
r = requests.post('https://diagnosis.yunohost.org/check-ports', json={'ports': ports}, timeout=30).json()
if "status" not in r.keys():
raise Exception("Bad syntax for response ? Raw json: %s" % str(r))
elif r["status"] == "error":
if "content" in r.keys():
raise Exception(r["content"])
else:
raise Exception("Bad syntax for response ? Raw json: %s" % str(r))
elif r["status"] != "ok" or "ports" not in r.keys() or not isinstance(r["ports"], dict):
raise Exception("Bad syntax for response ? Raw json: %s" % str(r))
except Exception as e:
raise YunohostError("diagnosis_ports_could_not_diagnose", error=e)
for port in ports:
if r["ports"].get(str(port), None) is not True:
yield dict(meta={"port": port},
status="ERROR",
summary=("diagnosis_ports_unreachable", {"port": port}))
else:
yield dict(meta={},
status="SUCCESS",
summary=("diagnosis_ports_ok", {"port": port}))
def main(args, env, loggers):
return PortsDiagnoser(args, env, loggers).diagnose()

View file

@ -0,0 +1,57 @@
#!/usr/bin/env python
import os
import random
import requests
from yunohost.diagnosis import Diagnoser
from yunohost.domain import domain_list
from yunohost.utils.error import YunohostError
class HttpDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 3600
dependencies = ["ip"]
def run(self):
nonce_digits = "0123456789abcedf"
all_domains = domain_list()["domains"]
for domain in all_domains:
nonce = ''.join(random.choice(nonce_digits) for i in range(16))
os.system("rm -rf /tmp/.well-known/ynh-diagnosis/")
os.system("mkdir -p /tmp/.well-known/ynh-diagnosis/")
os.system("touch /tmp/.well-known/ynh-diagnosis/%s" % nonce)
try:
r = requests.post('https://diagnosis.yunohost.org/check-http', json={'domain': domain, "nonce": nonce}, timeout=30).json()
if "status" not in r.keys():
raise Exception("Bad syntax for response ? Raw json: %s" % str(r))
elif r["status"] == "error" and ("code" not in r.keys() or r["code"] not in ["error_http_check_connection_error", "error_http_check_unknown_error"]):
if "content" in r.keys():
raise Exception(r["content"])
else:
raise Exception("Bad syntax for response ? Raw json: %s" % str(r))
except Exception as e:
raise YunohostError("diagnosis_http_could_not_diagnose", error=e)
if r["status"] == "ok":
yield dict(meta={"domain": domain},
status="SUCCESS",
summary=("diagnosis_http_ok", {"domain": domain}))
else:
yield dict(meta={"domain": domain},
status="ERROR",
summary=("diagnosis_http_unreachable", {"domain": domain}))
# In there or idk where else ...
# try to diagnose hairpinning situation by crafting a request for the
# global ip (from within local network) and seeing if we're getting the right page ?
def main(args, env, loggers):
return HttpDiagnoser(args, env, loggers).diagnose()

View file

@ -0,0 +1,42 @@
#!/usr/bin/env python
import os
from yunohost.diagnosis import Diagnoser
class MailDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 3600
dependencies = ["ip"]
def run(self):
# Is outgoing port 25 filtered somehow ?
if os.system('/bin/nc -z -w2 yunohost.org 25') == 0:
yield dict(meta={"test": "ougoing_port_25"},
status="SUCCESS",
summary=("diagnosis_mail_ougoing_port_25_ok",{}))
else:
yield dict(meta={"test": "outgoing_port_25"},
status="ERROR",
summary=("diagnosis_mail_ougoing_port_25_blocked",{}))
# Mail blacklist using dig requests (c.f. ljf's code)
# SMTP reachability (c.f. check-smtp to be implemented on yunohost's remote diagnoser)
# ideally, SPF / DMARC / DKIM validation ... (c.f. https://github.com/alexAubin/yunoScripts/blob/master/yunoDKIM.py possibly though that looks horrible)
# check that the mail queue is not filled with hundreds of email pending
# check that the recent mail logs are not filled with thousand of email sending (unusual number of mail sent)
# check for unusual failed sending attempt being refused in the logs ?
def main(args, env, loggers):
return MailDiagnoser(args, env, loggers).diagnose()

View file

@ -0,0 +1,51 @@
#!/usr/bin/env python
import os
from yunohost.diagnosis import Diagnoser
from yunohost.service import service_status
# TODO : all these are arbitrary, should be collectively validated
services_ignored = {"glances"}
services_critical = {"dnsmasq", "fail2ban", "yunohost-firewall", "nginx", "slapd", "ssh"}
# TODO / FIXME : we should do something about this postfix thing
# The nominal value is to be "exited" ... some daemon is actually running
# in a different thread that the thing started by systemd, which is fine
# but somehow sometimes it gets killed and there's no easy way to detect it
# Just randomly restarting it will fix ths issue. We should find some trick
# to identify the PID of the process and check it's still up or idk
services_expected_to_be_exited = {"postfix", "yunohost-firewall"}
class ServicesDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 300
dependencies = []
def run(self):
all_result = service_status()
for service, result in sorted(all_result.items()):
if service in services_ignored:
continue
item = dict(meta={"service": service})
expected_status = "running" if service not in services_expected_to_be_exited else "exited"
# TODO / FIXME : might also want to check that services are enabled
if result["active"] != "active" or result["status"] != expected_status:
item["status"] = "WARNING" if service not in services_critical else "ERROR"
item["summary"] = ("diagnosis_services_bad_status", {"service": service, "status": result["active"] + "/" + result["status"]})
# TODO : could try to append the tail of the service log to the "details" key ...
else:
item["status"] = "SUCCESS"
item["summary"] = ("diagnosis_services_good_status", {"service": service, "status": result["active"] + "/" + result["status"]})
yield item
def main(args, env, loggers):
return ServicesDiagnoser(args, env, loggers).diagnose()

View file

@ -0,0 +1,85 @@
#!/usr/bin/env python
import os
import psutil
from yunohost.diagnosis import Diagnoser
class SystemResourcesDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 3600 * 24
dependencies = []
def run(self):
#
# RAM
#
ram = psutil.virtual_memory()
ram_total_abs_MB = ram.total / (1024**2)
ram_available_abs_MB = ram.available / (1024**2)
ram_available_percent = round(100 * ram.available / ram.total)
item = dict(meta={"test": "ram"})
infos = {"total_abs_MB": ram_total_abs_MB, "available_abs_MB": ram_available_abs_MB, "available_percent": ram_available_percent}
if ram_available_abs_MB < 100 or ram_available_percent < 5:
item["status"] = "ERROR"
item["summary"] = ("diagnosis_ram_verylow", infos)
elif ram_available_abs_MB < 200 or ram_available_percent < 10:
item["status"] = "WARNING"
item["summary"] = ("diagnosis_ram_low", infos)
else:
item["status"] = "SUCCESS"
item["summary"] = ("diagnosis_ram_ok", infos)
yield item
#
# Swap
#
swap = psutil.swap_memory()
swap_total_abs_MB = swap.total / (1024*1024)
item = dict(meta={"test": "swap"})
infos = {"total_MB": swap_total_abs_MB}
if swap_total_abs_MB <= 0:
item["status"] = "ERROR"
item["summary"] = ("diagnosis_swap_none", infos)
elif swap_total_abs_MB <= 256:
item["status"] = "WARNING"
item["summary"] = ("diagnosis_swap_notsomuch", infos)
else:
item["status"] = "SUCCESS"
item["summary"] = ("diagnosis_swap_ok", infos)
yield item
#
# Disks usage
#
disk_partitions = psutil.disk_partitions()
for disk_partition in disk_partitions:
device = disk_partition.device
mountpoint = disk_partition.mountpoint
usage = psutil.disk_usage(mountpoint)
free_abs_GB = usage.free / (1024 ** 3)
free_percent = 100 - usage.percent
item = dict(meta={"test": "diskusage", "mountpoint": mountpoint})
infos = {"mountpoint": mountpoint, "device": device, "free_abs_GB": free_abs_GB, "free_percent": free_percent}
if free_abs_GB < 1 or free_percent < 5:
item["status"] = "ERROR"
item["summary"] = ("diagnosis_diskusage_verylow", infos)
elif free_abs_GB < 2 or free_percent < 10:
item["status"] = "WARNING"
item["summary"] = ("diagnosis_diskusage_low", infos)
else:
item["status"] = "SUCCESS"
item["summary"] = ("diagnosis_diskusage_ok", infos)
yield item
def main(args, env, loggers):
return SystemResourcesDiagnoser(args, env, loggers).diagnose()

View file

@ -0,0 +1,56 @@
#!/usr/bin/env python
import os
import subprocess
from yunohost.diagnosis import Diagnoser
from yunohost.regenconf import manually_modified_files, manually_modified_files_compared_to_debian_default
class RegenconfDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 300
dependencies = []
def run(self):
# nginx -t
p = subprocess.Popen("nginx -t".split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = p.communicate()
if p.returncode != 0:
yield dict(meta={"test": "nginx-t"},
status="ERROR",
summary=("diagnosis_regenconf_nginx_conf_broken", {}),
details=[(out, ())]
)
regenconf_modified_files = manually_modified_files()
debian_modified_files = manually_modified_files_compared_to_debian_default(ignore_handled_by_regenconf=True)
if regenconf_modified_files == []:
yield dict(meta={"test": "regenconf"},
status="SUCCESS",
summary=("diagnosis_regenconf_allgood", {})
)
else:
for f in regenconf_modified_files:
yield dict(meta={"test": "regenconf", "file": f},
status="WARNING",
summary=("diagnosis_regenconf_manually_modified", {"file": f}),
details=[("diagnosis_regenconf_manually_modified_details", {})]
)
for f in debian_modified_files:
yield dict(meta={"test": "debian", "file": f},
status="WARNING",
summary=("diagnosis_regenconf_manually_modified_debian", {"file": f}),
details=[("diagnosis_regenconf_manually_modified_debian_details", {})]
)
def main(args, env, loggers):
return RegenconfDiagnoser(args, env, loggers).diagnose()

View file

@ -0,0 +1,98 @@
#!/usr/bin/env python
import os
import json
import subprocess
from yunohost.diagnosis import Diagnoser
from moulinette.utils.filesystem import read_json, write_to_json
class SecurityDiagnoser(Diagnoser):
id_ = os.path.splitext(os.path.basename(__file__))[0].split("-")[1]
cache_duration = 3600
dependencies = []
def run(self):
"CVE-2017-5754"
if self.is_vulnerable_to_meltdown():
yield dict(meta={"test": "meltdown"},
status="ERROR",
summary=("diagnosis_security_vulnerable_to_meltdown", {}),
details=[("diagnosis_security_vulnerable_to_meltdown_details", ())]
)
else:
yield dict(meta={},
status="SUCCESS",
summary=("diagnosis_security_all_good", {})
)
def is_vulnerable_to_meltdown(self):
# meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754
# We use a cache file to avoid re-running the script so many times,
# which can be expensive (up to around 5 seconds on ARM)
# and make the admin appear to be slow (c.f. the calls to diagnosis
# from the webadmin)
#
# The cache is in /tmp and shall disappear upon reboot
# *or* we compare it to dpkg.log modification time
# such that it's re-ran if there was package upgrades
# (e.g. from yunohost)
cache_file = "/tmp/yunohost-meltdown-diagnosis"
dpkg_log = "/var/log/dpkg.log"
if os.path.exists(cache_file):
if not os.path.exists(dpkg_log) or os.path.getmtime(cache_file) > os.path.getmtime(dpkg_log):
self.logger_debug("Using cached results for meltdown checker, from %s" % cache_file)
return read_json(cache_file)[0]["VULNERABLE"]
# script taken from https://github.com/speed47/spectre-meltdown-checker
# script commit id is store directly in the script
SCRIPT_PATH = "/usr/lib/moulinette/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh"
# '--variant 3' corresponds to Meltdown
# example output from the script:
# [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}]
try:
self.logger_debug("Running meltdown vulnerability checker")
call = subprocess.Popen("bash %s --batch json --variant 3" %
SCRIPT_PATH, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# TODO / FIXME : here we are ignoring error messages ...
# in particular on RPi2 and other hardware, the script complains about
# "missing some kernel info (see -v), accuracy might be reduced"
# Dunno what to do about that but we probably don't want to harass
# users with this warning ...
output, err = call.communicate()
assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode
# If there are multiple lines, sounds like there was some messages
# in stdout that are not json >.> ... Try to get the actual json
# stuff which should be the last line
output = output.strip()
if "\n" in output:
self.logger_debug("Original meltdown checker output : %s" % output)
output = output.split("\n")[-1]
CVEs = json.loads(output)
assert len(CVEs) == 1
assert CVEs[0]["NAME"] == "MELTDOWN"
except Exception as e:
import traceback
traceback.print_exc()
self.logger_warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e)
raise Exception("Command output for failed meltdown check: '%s'" % output)
self.logger_debug("Writing results from meltdown checker to cache file, %s" % cache_file)
write_to_json(cache_file, CVEs)
return CVEs[0]["VULNERABLE"]
def main(args, env, loggers):
return SecurityDiagnoser(args, env, loggers).diagnose()

View file

@ -1,5 +0,0 @@
# Default is to launch glances with '-s' option.
DAEMON_ARGS="-s -B 127.0.0.1"
# Change to 'true' to have glances running at startup
RUN="true"

View file

@ -16,6 +16,10 @@ server {
return 301 https://$http_host$request_uri;
}
location /.well-known/ynh-diagnosis/ {
alias /tmp/.well-known/ynh-diagnosis/;
}
location /.well-known/autoconfig/mail/ {
alias /var/www/.well-known/{{ domain }}/autoconfig/mail/;
}

View file

@ -17,7 +17,6 @@ redis-server:
mysql:
log: [/var/log/mysql.log,/var/log/mysql.err]
alternates: ['mariadb']
glances: {}
ssh:
log: /var/log/auth.log
metronome:
@ -32,6 +31,7 @@ yunohost-firewall:
need_lock: true
nslcd:
log: /var/log/syslog
glances: null
nsswitch: null
ssl: null
yunohost: null

2
debian/control vendored
View file

@ -15,7 +15,7 @@ Depends: ${python:Depends}, ${misc:Depends}
, python-psutil, python-requests, python-dnspython, python-openssl
, python-apt, python-miniupnpc, python-dbus, python-jinja2
, python-toml
, glances, apt-transport-https
, apt-transport-https
, dnsutils, bind9utils, unzip, git, curl, cron, wget, jq
, ca-certificates, netcat-openbsd, iproute2
, mariadb-server, php-mysql | php-mysqlnd

View file

@ -211,8 +211,8 @@
"mail_domain_unknown": "Unknown mail address domain '{domain:s}'",
"mail_forward_remove_failed": "Unable to remove mail forward '{mail:s}'",
"mailbox_used_space_dovecot_down": "Dovecot mailbox service need to be up, if you want to get mailbox used space",
"maindomain_change_failed": "Unable to change the main domain",
"maindomain_changed": "The main domain has been changed",
"main_domain_change_failed": "Unable to change the main domain",
"main_domain_changed": "The main domain has been changed",
"migrate_tsig_end": "Migration to hmac-sha512 finished",
"migrate_tsig_failed": "Migrating the dyndns domain {domain} to hmac-sha512 failed, rolling back. Error: {error_code} - {error}",
"migrate_tsig_start": "Not secure enough key algorithm detected for TSIG signature of domain '{domain}', initiating migration to the more secure one hmac-sha512",
@ -404,7 +404,7 @@
"log_user_create": "إضافة المستخدم '{}'",
"log_user_delete": "حذف المستخدم '{}'",
"log_user_update": "تحديث معلومات المستخدم '{}'",
"log_tools_maindomain": "جعل '{}' كنطاق أساسي",
"log_domain_main_domain": "جعل '{}' كنطاق أساسي",
"log_tools_upgrade": "تحديث حُزم ديبيان",
"log_tools_shutdown": "إطفاء الخادم",
"log_tools_reboot": "إعادة تشغيل الخادم",

View file

@ -162,7 +162,7 @@
"admin_password_too_long": "Trieu una contrasenya de menys de 127 caràcters",
"dpkg_is_broken": "No es pot fer això en aquest instant perquè dpkg/APT (els gestors de paquets del sistema) sembla estar mal configurat… Podeu intentar solucionar-ho connectant-vos per SSH i executant «sudo dpkg --configure -a».",
"dnsmasq_isnt_installed": "sembla que dnsmasq no està instal·lat, executeu \"apt-get remove bind9 && apt-get install dnsmasq\"",
"domain_cannot_remove_main": "No es pot eliminar el domini principal. S'ha d'establir un nou domini primer",
"domain_cannot_remove_main": "No es pot eliminar «{domain:s}» ja que és el domini principal, primer s'ha d'establir un nou domini principal utilitzant «yunohost domain main-domain -n <un-altre-domini>», aquí hi ha una llista dels possibles dominis: {other_domains:s}",
"domain_cert_gen_failed": "No s'ha pogut generar el certificat",
"domain_created": "S'ha creat el domini",
"domain_creation_failed": "No s'ha pogut crear el domini {domain}: {error}",
@ -271,7 +271,7 @@
"log_user_create": "Afegeix l'usuari « {} »",
"log_user_delete": "Elimina l'usuari « {} »",
"log_user_update": "Actualitza la informació de l'usuari « {} »",
"log_tools_maindomain": "Fes de « {} » el domini principal",
"log_domain_main_domain": "Fes de « {} » el domini principal",
"log_tools_migrations_migrate_forward": "Migrar",
"log_tools_migrations_migrate_backward": "Migrar endarrera",
"log_tools_postinstall": "Fer la post instal·lació del servidor YunoHost",
@ -289,8 +289,8 @@
"mail_forward_remove_failed": "No s'han pogut eliminar el reenviament de correu «{mail:s}»",
"mailbox_used_space_dovecot_down": "S'ha d'engegar el servei de correu Dovecot, per poder obtenir l'espai utilitzat per la bústia de correu",
"mail_unavailable": "Aquesta adreça de correu està reservada i ha de ser atribuïda automàticament el primer usuari",
"maindomain_change_failed": "No s'ha pogut canviar el domini principal",
"maindomain_changed": "S'ha canviat el domini principal",
"main_domain_change_failed": "No s'ha pogut canviar el domini principal",
"main_domain_changed": "S'ha canviat el domini principal",
"migrate_tsig_end": "La migració cap a HMAC-SHA-512 s'ha acabat",
"migrate_tsig_failed": "Ha fallat la migració del domini DynDNS «{domain}» cap a HMAC-SHA-512, anul·lant les modificacions. Error: {error_code}, {error}",
"migrate_tsig_start": "L'algoritme de generació de claus no es prou segur per a la signatura TSIG del domini «{domain}», començant la migració cap a un de més segur HMAC-SHA-512",
@ -308,7 +308,7 @@
"migration_description_0007_ssh_conf_managed_by_yunohost_step1": "La configuració SSH serà gestionada per YunoHost (pas 1, automàtic)",
"migration_description_0008_ssh_conf_managed_by_yunohost_step2": "La configuració SSH serà gestionada per YunoHost (pas 2, manual)",
"migration_description_0009_decouple_regenconf_from_services": "Desvincula el mecanisme regen-conf dels serveis",
"migration_description_0010_migrate_to_apps_json": "Elimina les llistes d'aplicacions obsoletes i utilitza la nova llista unificada «apps.json» en el seu lloc",
"migration_description_0010_migrate_to_apps_json": "Elimina els catàlegs d'aplicacions obsolets i utilitza la nova llista unificada «apps.json» en el seu lloc (obsolet, substituït per la migració 13)",
"migration_0003_backward_impossible": "La migració Stretch no és reversible.",
"migration_0003_start": "Ha començat la migració a Stretch. Els registres estaran disponibles a {logfile}.",
"migration_0003_patching_sources_list": "Modificant el fitxer sources.lists…",
@ -320,7 +320,7 @@
"migration_0003_system_not_fully_up_to_date": "El vostre sistema no està completament actualitzat. S'ha de fer una actualització normal abans de fer la migració a Stretch.",
"migration_0003_still_on_jessie_after_main_upgrade": "Hi ha hagut un problema durant l'actualització principal: El sistema encara està amb Jessie? Per investigar el problema, mireu el registres a {log}:s…",
"migration_0003_general_warning": "Tingueu en compte que la migració és una operació delicada. L'equip de YunoHost a fet els possibles per revisar-la i provar-la, però la migració pot provocar errors en parts del sistema o aplicacions.\n\nPer tant, es recomana:\n - Fer una còpia de seguretat de les dades o aplicacions importants. Més informació a https://yunohost.org/backup;\n - Sigueu pacient un cop llençada la migració: en funció de la connexió a internet i el maquinari, pot trigar fins a unes hores per actualitzar-ho tot.\n\nD'altra banda, el port per SMTP, utilitzat per clients de correu externs (com Thunderbird o K9-Mail) ha canviat de 465 (SSL/TLS) a 587 (STARTTLS). L'antic port (465) serà tancat automàticament, i el nou port (587) serà obert en el tallafocs. Tots els usuaris *hauran* d'adaptar la configuració dels clients de correu en acord amb aquests canvis.",
"migration_0003_problematic_apps_warning": "Tingueu en compte que s'han detectat les aplicacions, possiblement, problemàtiques següents. Sembla que aquestes no s'han instal·lat des d'una applist, o que no estan marcades com a «working». Per conseqüent, no podem garantir que segueixin funcionant després de l'actualització: {problematic_apps}",
"migration_0003_problematic_apps_warning": "Tingueu en compte que s'han detectat les aplicacions, possiblement, problemàtiques següents. Sembla que aquestes no s'han instal·lat des d'un catàleg d'aplicacions, o que no estan marcades com a «working». Per conseqüent, no podem garantir que segueixin funcionant després de l'actualització: {problematic_apps}",
"migration_0003_modified_files": "Tingueu en compte que s'han detectat els següents fitxers que han estat modificats manualment i podrien sobreescriure's al final de l'actualització: {manually_modified_files}",
"migration_0005_postgresql_94_not_installed": "PostgreSQL no està instal·lat en el sistema. No hi ha res per fer.",
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 està instal·lat, però no PostgreSQL 9.6? Alguna cosa estranya a passat en el sistema :( …",
@ -626,5 +626,82 @@
"permission_already_up_to_date": "No s'ha actualitzat el permís perquè la petició d'afegir/eliminar ja corresponent a l'estat actual.",
"permission_currently_allowed_for_visitors": "El permís ja el tenen el grup de visitants a més d'altres grups. Segurament s'hauria de revocar el permís al grup dels visitants o eliminar els altres grups als que s'ha atribuït.",
"permission_currently_allowed_for_all_users": "El permís ha el té el grup de tots els usuaris (all_users) a més d'altres grups. Segurament s'hauria de revocar el permís a «all_users» o eliminar els altres grups als que s'ha atribuït.",
"permission_require_account": "El permís {permission} només té sentit per als usuaris que tenen un compte, i per tant no es pot activar per als visitants."
"permission_require_account": "El permís {permission} només té sentit per als usuaris que tenen un compte, i per tant no es pot activar per als visitants.",
"app_remove_after_failed_install": "Eliminant l'aplicació després que hagi fallat la instal·lació…",
"diagnosis_basesystem_ynh_main_version": "El servidor funciona amb YunoHost {main_version} ({repo})",
"diagnosis_ram_low": "El sistema només té {available_abs_MB} MB ({available_percent}%) de memòria RAM disponibles d'un total de {total_abs_MB} MB. Aneu amb compte.",
"diagnosis_swap_none": "El sistema no té swap. Hauríeu de considerar afegir un mínim de 256 MB de swap per evitar situacions en les que el sistema es queda sense memòria.",
"diagnosis_regenconf_manually_modified": "El fitxer de configuració {file} ha estat modificat manualment.",
"diagnosis_regenconf_nginx_conf_broken": "Sembla que s'ha trencat la configuració NGINX!",
"diagnosis_security_vulnerable_to_meltdown_details": "Per arreglar-ho, hauríeu d'actualitzar i reiniciar el sistema per tal de carregar el nou nucli de linux (o contactar amb el proveïdor del servidor si no funciona). Vegeu https://meltdownattack.com/ per a més informació.",
"diagnosis_http_could_not_diagnose": "No s'ha pogut diagnosticar si el domini és accessible des de l'exterior. Error: {error}",
"domain_cannot_remove_main_add_new_one": "No es pot eliminar «{domain:s}» ja que és el domini principal i únic domini, primer s'ha d'afegir un altre domini utilitzant «yunohost domain add <un-altre-domini.com>», i després fer-lo el domini principal amb «yunohost domain main-domain -n <un-altre-domini.com>» i després es pot eliminar el domini «{domain:s}» utilitzant «yunohost domain remove {domain:s}».",
"diagnosis_basesystem_host": "El servidor funciona amb Debian {debian_version}.",
"diagnosis_basesystem_kernel": "El servidor funciona amb el nucli de Linux {kernel_version}",
"diagnosis_basesystem_ynh_single_version": "{0} versió: {1}({2})",
"diagnosis_basesystem_ynh_inconsistent_versions": "Esteu utilitzant versions inconsistents dels paquets de YunoHost… probablement a causa d'una actualització fallida o parcial.",
"diagnosis_display_tip_web": "Podeu anar a la secció de Diagnòstics (en la pantalla principal) per veure els errors que s'han trobat.",
"diagnosis_failed_for_category": "Ha fallat el diagnòstic per la categoria «{category}» : {error}",
"diagnosis_display_tip_cli": "Podeu executar «yunohost diagnosis show --issues» per mostrar els errors que s'han trobat.",
"diagnosis_cache_still_valid": "(La memòria cau encara és vàlida pel diagnòstic de {category}. No es tornar a diagnosticar de moment!)",
"diagnosis_cant_run_because_of_dep": "No es pot fer el diagnòstic per {category} mentre hi ha problemes importants relacionats amb {dep}.",
"diagnosis_ignored_issues": "(+ {nb_ignored} problema(es) ignorat(s))",
"diagnosis_found_errors": "S'ha trobat problema(es) important(s) {errors} relacionats amb {category}!",
"diagnosis_found_errors_and_warnings": "S'ha trobat problema(es) important(s) {errors} (i avis(os) {warnings}) relacionats amb {category}!",
"diagnosis_found_warnings": "S'han trobat ítems {warnings} que es podrien millorar per {category}.",
"diagnosis_everything_ok": "Tot sembla correcte per {category}!",
"diagnosis_failed": "No s'han pogut obtenir els resultats del diagnòstic per la categoria «{category}» : {error}",
"diagnosis_ip_connected_ipv4": "El servidor està connectat a Internet amb IPv4!",
"diagnosis_ip_no_ipv4": "El servidor no té una IPv4 que funcioni.",
"diagnosis_ip_connected_ipv6": "El servidor està connectat a Internet amb IPv6!",
"diagnosis_ip_no_ipv6": "El servidor no té una IPv6 que funcioni.",
"diagnosis_ip_not_connected_at_all": "Sembla que el servidor no està connectat a internet!?",
"diagnosis_ip_dnsresolution_working": "La resolució de nom de domini està funcionant!",
"diagnosis_ip_broken_dnsresolution": "La resolució de nom de domini falla per algun motiu… Està el tallafocs bloquejant les peticions DNS?",
"diagnosis_ip_broken_resolvconf": "La resolució de nom de domini sembla caiguda en el servidor, podria estar relacionat amb el fet que /etc/resolv.conf no apunta cap a 127.0.0.1.",
"diagnosis_ip_weird_resolvconf": "La resolució DNS sembla estar funcionant, però aneu amb compte ja que esteu utilitzant un versió personalitzada de /etc/resolv.conf.",
"diagnosis_ip_weird_resolvconf_details": "En canvi, aquest fitxer hauria de ser un enllaç simbòlic cap a /etc/resolvconf/run/resolv.conf i que aquest apunti cap a 127.0.0.1 (dnsmasq). La configuració del «resolver» real s'hauria de fer via /etc/resolv.dnsmaq.conf.",
"diagnosis_dns_good_conf": "Bona configuració DNS pel domini {domain} (categoria {category})",
"diagnosis_dns_bad_conf": "Configuració DNS incorrecta o inexistent pel domini {domain} (categoria {category})",
"diagnosis_dns_missing_record": "Segons la configuració DNS recomanada, hauríeu d'afegir un registre DNS de tipus {0}, nom {1} i valor {2}",
"diagnosis_dns_discrepancy": "Segons la configuració DNS recomanada, el valor pel registre DNS de tipus {0} i nom {1} hauria de ser {2}, en comptes de {3}.",
"diagnosis_services_good_status": "El servei {service} està {status} tal i com s'esperava!",
"diagnosis_services_bad_status": "El servei {service} està {status} :/",
"diagnosis_diskusage_verylow": "El lloc d'emmagatzematge {mountpoint} (en l'aparell {device}) només té disponibles {free_abs_GB} GB ({free_percent}%). Hauríeu de considerar alliberar una mica d'espai.",
"diagnosis_diskusage_low": "El lloc d'emmagatzematge {mountpoint} (en l'aparell {device}) només té disponibles {free_abs_GB} GB ({free_percent}%). Aneu amb compte.",
"diagnosis_diskusage_ok": "El lloc d'emmagatzematge {mountpoint} (en l'aparell {device}) encara té {free_abs_GB} GB ({free_percent}%) lliures!",
"diagnosis_ram_verylow": "El sistema només té {available_abs_MB} MB ({available_percent}%) de memòria RAM disponibles! (d'un total de {total_abs_MB} MB)",
"diagnosis_ram_ok": "El sistema encara té {available_abs_MB} MB ({available_percent}%) de memòria RAM disponibles d'un total de {total_abs_MB} MB.",
"diagnosis_swap_notsomuch": "El sistema només té {total_MB} MB de swap. Hauríeu de considerar tenir un mínim de 256 MB per evitar situacions en les que el sistema es queda sense memòria.",
"diagnosis_swap_ok": "El sistema té {total_MB} MB de swap!",
"diagnosis_regenconf_allgood": "Tots els fitxers de configuració estan en acord amb la configuració recomanada!",
"diagnosis_regenconf_manually_modified_details": "No hauria de ser cap problema sempre i quan sapigueu el que esteu fent ;) !",
"diagnosis_regenconf_manually_modified_debian": "El fitxer de configuració {file} ha estat modificat manualment respecte al fitxer per defecte de Debian.",
"diagnosis_regenconf_manually_modified_debian_details": "No hauria de ser cap problema, però ho haureu de vigilar...",
"diagnosis_security_all_good": "No s'ha trobat cap vulnerabilitat de seguretat crítica.",
"diagnosis_security_vulnerable_to_meltdown": "Sembla que el sistema és vulnerable a la vulnerabilitat de seguretat crítica Meltdown",
"diagnosis_description_basesystem": "Sistema de base",
"diagnosis_description_ip": "Connectivitat a Internet",
"diagnosis_description_dnsrecords": "Registres DNS",
"diagnosis_description_services": "Verificació de l'estat dels serveis",
"diagnosis_description_systemresources": "Recursos del sistema",
"diagnosis_description_ports": "Exposició dels ports",
"diagnosis_description_http": "Exposició HTTP",
"diagnosis_description_regenconf": "Configuració del sistema",
"diagnosis_description_security": "Verificacions de seguretat",
"diagnosis_ports_could_not_diagnose": "No s'ha pogut diagnosticar si els ports són accessibles des de l'exterior. Error: {error}",
"diagnosis_ports_unreachable": "El port {port} no és accessible des de l'exterior.",
"diagnosis_ports_ok": "El port {port} és accessible des de l'exterior.",
"diagnosis_http_ok": "El domini {domain} és accessible des de l'exterior.",
"diagnosis_http_unreachable": "El domini {domain} no és accessible a través de HTTP des de l'exterior.",
"diagnosis_unknown_categories": "Les següents categories són desconegudes: {categories}",
"apps_catalog_init_success": "S'ha iniciat el sistema de catàleg d'aplicacions!",
"apps_catalog_updating": "S'està actualitzant el catàleg d'aplicacions…",
"apps_catalog_failed_to_download": "No s'ha pogut descarregar el catàleg d'aplicacions {apps_catalog}: {error}",
"apps_catalog_obsolete_cache": "La memòria cau del catàleg d'aplicacions és buida o obsoleta.",
"apps_catalog_update_success": "S'ha actualitzat el catàleg d'aplicacions!",
"diagnosis_mail_ougoing_port_25_ok": "El port de sortida 25 no està bloquejat i els correus es poden enviar a altres servidors.",
"diagnosis_mail_ougoing_port_25_blocked": "Sembla que el port de sortida 25 està bloquejat. Hauríeu d'intentar desbloquejar-lo al panell de configuració del proveïdor d'accés a internet (o allotjador). Mentrestant, el servidor no podrà enviar correus a altres servidors.",
"diagnosis_description_mail": "Correu electrònic",
"migration_description_0013_futureproof_apps_catalog_system": "Migrar al nou sistema de catàleg d'aplicacions resistent al pas del temps"
}

View file

@ -104,8 +104,8 @@
"mail_alias_remove_failed": "E-Mail Alias '{mail:s}' konnte nicht entfernt werden",
"mail_domain_unknown": "Unbekannte Mail Domain '{domain:s}'",
"mail_forward_remove_failed": "Mailweiterleitung '{mail:s}' konnte nicht entfernt werden",
"maindomain_change_failed": "Die Hauptdomain konnte nicht geändert werden",
"maindomain_changed": "Die Hauptdomain wurde geändert",
"main_domain_change_failed": "Die Hauptdomain konnte nicht geändert werden",
"main_domain_changed": "Die Hauptdomain wurde geändert",
"monitor_disabled": "Das Servermonitoring wurde erfolgreich deaktiviert",
"monitor_enabled": "Das Servermonitoring wurde aktiviert",
"monitor_glances_con_failed": "Verbindung mit Glances nicht möglich",
@ -293,7 +293,7 @@
"backup_abstract_method": "Diese Backup-Methode wird noch nicht unterstützt",
"backup_applying_method_tar": "Erstellen des Backup-tar Archives…",
"backup_applying_method_copy": "Kopiere alle Dateien ins Backup…",
"app_change_url_no_script": "Die Anwendung '{app_name:s}' unterstützt bisher keine URL-Modufikation. Vielleicht gibt es eine Aktualisierung.",
"app_change_url_no_script": "Die Anwendung '{app_name:s}' unterstützt bisher keine URL-Modifikation. Vielleicht sollte sie aktualisiert werden.",
"app_location_unavailable": "Diese URL ist nicht verfügbar oder wird von einer installierten Anwendung genutzt:\n{apps:s}",
"backup_applying_method_custom": "Rufe die benutzerdefinierte Backup-Methode '{method:s}' auf…",
"backup_archive_system_part_not_available": "Der System-Teil '{part:s}' ist in diesem Backup nicht enthalten",
@ -350,7 +350,7 @@
"app_start_remove": "Anwendung {app} wird entfernt…",
"app_start_install": "Anwendung {app} wird installiert…",
"app_not_upgraded": "Die App '{failed_app}' konnte nicht aktualisiert werden. Infolgedessen wurden die folgenden App-Upgrades abgebrochen: {apps}",
"app_make_default_location_already_used": "Die App \"{app}\" kann nicht als Standard für die Domain \"{domain}\" festgelegt werden. Sie wird bereits von der anderen App \"{other_app}\" verwendet",
"app_make_default_location_already_used": "Die App \"{app}\" kann nicht als Standard für die Domain \"{domain}\" festgelegt werden. Sie wird bereits von der App \"{other_app}\" verwendet",
"aborting": "Breche ab.",
"app_action_cannot_be_ran_because_required_services_down": "Diese App erfordert einige Dienste, die derzeit nicht verfügbar sind. Bevor Sie fortfahren, sollten Sie versuchen, die folgenden Dienste neu zu starten (und möglicherweise untersuchen, warum sie nicht verfügbar sind): {services}",
"already_up_to_date": "Nichts zu tun. Alles ist bereits auf dem neusten Stand.",
@ -414,5 +414,8 @@
"global_settings_key_doesnt_exists": "Der Schlüssel'{settings_key:s}' existiert nicht in den globalen Einstellungen, du kannst alle verfügbaren Schlüssel sehen, indem du 'yunohost settings list' ausführst",
"log_app_makedefault": "Mache '{}' zur Standard-Anwendung",
"hook_json_return_error": "Konnte die Rückkehr vom Einsprungpunkt {path:s} nicht lesen. Fehler: {msg:s}. Unformatierter Inhalt: {raw_content}",
"app_full_domain_unavailable": "Es tut uns leid, aber diese Anwendung erfordert die Installation einer vollständigen Domäne, aber einige andere Anwendungen sind bereits auf der Domäne'{domain}' installiert. Eine mögliche Lösung ist das Hinzufügen und Verwenden einer Subdomain, die dieser Anwendung zugeordnet ist."
"app_full_domain_unavailable": "Es tut uns leid, aber diese Anwendung erfordert die Installation auf einer eigenen Domain, aber einige andere Anwendungen sind bereits auf der Domäne'{domain}' installiert. Eine mögliche Lösung ist das Hinzufügen und Verwenden einer Subdomain, die dieser Anwendung zugeordnet ist.",
"app_install_failed": "Installation von {app} fehlgeschlagen: {error}",
"app_install_script_failed": "Im Installationsscript ist ein Fehler aufgetreten",
"app_remove_after_failed_install": "Entfernen der App nach fehlgeschlagener Installation…"
}

View file

@ -21,13 +21,10 @@
"app_extraction_failed": "Could not extract the installation files",
"app_full_domain_unavailable": "Sorry, this app must be installed on a domain of its own, but other apps are already installed on the domain '{domain}'. You could use a subdomain dedicated to this app instead.",
"app_id_invalid": "Invalid app ID",
"app_incompatible": "The app {app} is incompatible with your YunoHost version",
"app_install_files_invalid": "These files cannot be installed",
"app_install_failed": "Could not install {app}: {error}",
"app_install_script_failed": "An error occurred inside the app installation script",
"app_location_already_used": "The app '{app}' is already installed in ({path})",
"app_make_default_location_already_used": "Can't make the app '{app}' the default on the domain, '{domain}' is already in use by the other app '{other_app}'",
"app_location_install_failed": "Cannot install the app there because it conflicts with the app '{other_app}' already installed in '{other_path}'",
"app_location_unavailable": "This URL is either unavailable, or conflicts with the already installed app(s):\n{apps:s}",
"app_manifest_invalid": "Something is wrong with the app manifest: {error}",
"app_not_upgraded": "The app '{failed_app}' failed to upgrade, and as a consequence the following apps upgrades have been cancelled: {apps}",
@ -55,21 +52,16 @@
"app_upgraded": "{app:s} upgraded",
"apps_already_up_to_date": "All apps are already up-to-date",
"apps_permission_not_found": "No permission found for the installed apps",
"appslist_corrupted_json": "Could not load the app lists. It looks like {filename:s} is damaged.",
"appslist_could_not_migrate": "Could not migrate the app list '{appslist:s}'! Could not parse the URL… The old cron job was kept kept in {bkp_file:s}.",
"appslist_fetched": "Updated the app list '{appslist:s}'",
"appslist_migrating": "Migrating the app list '{appslist:s}'…",
"appslist_name_already_tracked": "A registered app list with the name {name:s} already exists.",
"appslist_removed": "The '{appslist:s}' app list was removed",
"appslist_retrieve_bad_format": "Could not read the fetched app list '{appslist:s}'",
"appslist_retrieve_error": "Cannot retrieve the remote app list '{appslist:s}': {error:s}",
"appslist_unknown": "The app list '{appslist:s}' is unknown.",
"appslist_url_already_tracked": "There is already a registered app list with the URL {url:s}.",
"apps_permission_restoration_failed": "Permission '{permission:s}' for app {app:s} restoration has failed",
"apps_catalog_init_success": "Apps catalog system initialized!",
"apps_catalog_updating": "Updating applications catalog...",
"apps_catalog_failed_to_download": "Unable to download the {apps_catalog} apps catalog: {error}",
"apps_catalog_obsolete_cache": "The apps catalog cache is empty or obsolete.",
"apps_catalog_update_success": "The application catalog has been updated!",
"ask_current_admin_password": "Current administration password",
"ask_email": "E-mail address",
"ask_firstname": "First name",
"ask_lastname": "Last name",
"ask_list_to_remove": "List to remove",
"ask_main_domain": "Main domain",
"ask_new_admin_password": "New administration password",
"ask_new_domain": "New domain",
@ -150,20 +142,82 @@
"confirm_app_install_danger": "DANGER! This app is known to be still experimental (if not explicitly not working)! You should probably NOT install it unless you know what you are doing. NO SUPPORT will be provided if this app doesn't work or break your system… If you are willing to take that risk anyway, type '{answers:s}'",
"confirm_app_install_thirdparty": "DANGER! This app is not part of Yunohost's app catalog. Installing third-party apps may compromise the integrity and security of your system. You should probably NOT install it unless you know what you are doing. NO SUPPORT will be provided if this app doesn't work or break your system… If you are willing to take that risk anyway, type '{answers:s}'",
"custom_app_url_required": "You must provide a URL to upgrade your custom app {app:s}",
"custom_appslist_name_required": "You must provide a name for your custom app list",
"diagnosis_debian_version_error": "Could not retrieve the Debian version: {error}",
"diagnosis_kernel_version_error": "Could not retrieve kernel version: {error}",
"diagnosis_monitor_disk_error": "Could not monitor disks: {error}",
"diagnosis_monitor_system_error": "Could not monitor system: {error}",
"diagnosis_no_apps": "No such installed app",
"dpkg_is_broken": "You cannot do this right now because dpkg/APT (the system package managers) seems to be in a broken state… You can try to solve this issue by connecting through SSH and running `sudo dpkg --configure -a`.",
"dpkg_lock_not_available": "This command can't be ran right now because another program seems to be using the lock of dpkg (the system package manager)",
"domain_cannot_remove_main": "Cannot remove main domain. Set one first",
"diagnosis_basesystem_host": "Server is running Debian {debian_version}.",
"diagnosis_basesystem_kernel": "Server is running Linux kernel {kernel_version}",
"diagnosis_basesystem_ynh_single_version": "{0} version: {1} ({2})",
"diagnosis_basesystem_ynh_main_version": "Server is running YunoHost {main_version} ({repo})",
"diagnosis_basesystem_ynh_inconsistent_versions": "You are running inconsistents versions of the YunoHost packages ... most probably because of a failed or partial upgrade.",
"diagnosis_display_tip_web": "You can go to the Diagnosis section (in the home screen) to see the issues found.",
"diagnosis_display_tip_cli": "You can run 'yunohost diagnosis show --issues' to display the issues found.",
"diagnosis_failed_for_category": "Diagnosis failed for category '{category}' : {error}",
"diagnosis_cache_still_valid": "(Cache still valid for {category} diagnosis. Not re-diagnosing yet!)",
"diagnosis_cant_run_because_of_dep": "Can't run diagnosis for {category} while there are important issues related to {dep}.",
"diagnosis_ignored_issues": "(+ {nb_ignored} ignored issue(s))",
"diagnosis_found_errors": "Found {errors} significant issue(s) related to {category}!",
"diagnosis_found_errors_and_warnings": "Found {errors} significant issue(s) (and {warnings} warning(s)) related to {category}!",
"diagnosis_found_warnings": "Found {warnings} item(s) that could be improved for {category}.",
"diagnosis_everything_ok": "Everything looks good for {category}!",
"diagnosis_failed": "Failed to fetch diagnosis result for category '{category}' : {error}",
"diagnosis_ip_connected_ipv4": "The server is connected to the Internet through IPv4 !",
"diagnosis_ip_no_ipv4": "The server does not have a working IPv4.",
"diagnosis_ip_connected_ipv6": "The server is connected to the Internet through IPv6 !",
"diagnosis_ip_no_ipv6": "The server does not have a working IPv6.",
"diagnosis_ip_not_connected_at_all": "The server does not seem to be connected to the Internet at all!?",
"diagnosis_ip_dnsresolution_working": "Domain name resolution is working!",
"diagnosis_ip_broken_dnsresolution": "Domain name resolution seems to be broken for some reason ... Is a firewall blocking DNS requests ?",
"diagnosis_ip_broken_resolvconf": "Domain name resolution seems to be broken on your server, which seems related to /etc/resolv.conf not pointing to 127.0.0.1.",
"diagnosis_ip_weird_resolvconf": "DNS resolution seems to be working, but be careful that you seem to be using a custom /etc/resolv.conf.",
"diagnosis_ip_weird_resolvconf_details": "Instead, this file should be a symlink to /etc/resolvconf/run/resolv.conf itself pointing to 127.0.0.1 (dnsmasq). The actual resolvers should be configured via /etc/resolv.dnsmasq.conf.",
"diagnosis_dns_good_conf": "Good DNS configuration for domain {domain} (category {category})",
"diagnosis_dns_bad_conf": "Bad / missing DNS configuration for domain {domain} (category {category})",
"diagnosis_dns_missing_record": "According to the recommended DNS configuration, you should add a DNS record with type {0}, name {1} and value {2}",
"diagnosis_dns_discrepancy": "According to the recommended DNS configuration, the value for the DNS record with type {0} and name {1} should be {2}, not {3}.",
"diagnosis_services_good_status": "Service {service} is {status} as expected!",
"diagnosis_services_bad_status": "Service {service} is {status} :/",
"diagnosis_diskusage_verylow": "Storage {mountpoint} (on device {device}) has only {free_abs_GB} GB ({free_percent}%) space remaining. You should really consider cleaning up some space.",
"diagnosis_diskusage_low": "Storage {mountpoint} (on device {device}) has only {free_abs_GB} GB ({free_percent}%) space remaining. Be careful.",
"diagnosis_diskusage_ok": "Storage {mountpoint} (on device {device}) still has {free_abs_GB} GB ({free_percent}%) space left!",
"diagnosis_ram_verylow": "The system has only {available_abs_MB} MB ({available_percent}%) RAM left! (out of {total_abs_MB} MB)",
"diagnosis_ram_low": "The system has {available_abs_MB} MB ({available_percent}%) RAM left out of {total_abs_MB} MB. Be careful.",
"diagnosis_ram_ok": "The system still has {available_abs_MB} MB ({available_percent}%) RAM left out of {total_abs_MB} MB.",
"diagnosis_swap_none": "The system has no swap at all. You should consider adding at least 256 MB of swap to avoid situations where the system runs out of memory.",
"diagnosis_swap_notsomuch": "The system has only {total_MB} MB swap. You should consider having at least 256 MB to avoid situations where the system runs out of memory.",
"diagnosis_swap_ok": "The system has {total_MB} MB of swap!",
"diagnosis_mail_ougoing_port_25_ok": "Outgoing port 25 is not blocked and email can be sent to other servers.",
"diagnosis_mail_ougoing_port_25_blocked": "Outgoing port 25 appears to be blocked. You should try to unblock it in your internet service provider (or hoster) configuration panel. Meanwhile, the server won't be able to send emails to other servers.",
"diagnosis_regenconf_allgood": "All configurations files are in line with the recommended configuration!",
"diagnosis_regenconf_manually_modified": "Configuration file {file} was manually modified.",
"diagnosis_regenconf_manually_modified_details": "This is probably OK as long as you know what you're doing ;) !",
"diagnosis_regenconf_manually_modified_debian": "Configuration file {file} was manually modified compared to Debian's default.",
"diagnosis_regenconf_manually_modified_debian_details": "This may probably be OK, but gotta keep an eye on it...",
"diagnosis_regenconf_nginx_conf_broken": "The nginx configuration appears to be broken!",
"diagnosis_security_all_good": "No critical security vulnerability was found.",
"diagnosis_security_vulnerable_to_meltdown": "You appear vulnerable to the Meltdown criticial security vulnerability",
"diagnosis_security_vulnerable_to_meltdown_details": "To fix this, you should upgrade your system and reboot to load the new linux kernel (or contact your server provider if this doesn't work). See https://meltdownattack.com/ for more infos.",
"diagnosis_description_basesystem": "Base system",
"diagnosis_description_ip": "Internet connectivity",
"diagnosis_description_dnsrecords": "DNS records",
"diagnosis_description_services": "Services status check",
"diagnosis_description_systemresources": "System resources",
"diagnosis_description_ports": "Ports exposure",
"diagnosis_description_http": "HTTP exposure",
"diagnosis_description_mail": "Email",
"diagnosis_description_regenconf": "System configurations",
"diagnosis_description_security": "Security checks",
"diagnosis_ports_could_not_diagnose": "Could not diagnose if ports are reachable from outside. Error: {error}",
"diagnosis_ports_unreachable": "Port {port} is not reachable from outside.",
"diagnosis_ports_ok": "Port {port} is reachable from outside.",
"diagnosis_http_could_not_diagnose": "Could not diagnose if domain is reachable from outside. Error: {error}",
"diagnosis_http_ok": "Domain {domain} is reachable from outside.",
"diagnosis_http_unreachable": "Domain {domain} is unreachable through HTTP from outside.",
"diagnosis_unknown_categories": "The following categories are unknown : {categories}",
"domain_cannot_remove_main": "You cannot remove '{domain:s}' since it's the main domain, you need first to set another domain as the main domain using 'yunohost domain main-domain -n <another-domain>', here is the list of candidate domains: {other_domains:s}",
"domain_cannot_remove_main_add_new_one": "You cannot remove '{domain:s}' since it's the main domain and your only domain, you need to first add another domain using 'yunohost domain add <another-domain.com>', then set is as the main domain using 'yunohost domain main-domain -n <another-domain.com>' and then you can remove the domain '{domain:s}' using 'yunohost domain remove {domain:s}'.'",
"domain_cert_gen_failed": "Could not generate certificate",
"domain_created": "Domain created",
"domain_creation_failed": "Could not create domain {domain}: {error}",
"domain_creation_failed": "Unable to create domain {domain}: {error}",
"domain_deleted": "Domain deleted",
"domain_deletion_failed": "Could not delete domain {domain}: {error}",
"domain_deletion_failed": "Unable to delete domain {domain}: {error}",
"domain_dns_conf_is_just_a_recommendation": "This command shows you the *recommended* configuration. It does not actually set up the DNS configuration for you. It is your responsability to configure your DNS zone in your registrar according to this recommendation.",
"domain_dyndns_already_subscribed": "You have already subscribed to a DynDNS domain",
"domain_dyndns_root_unknown": "Unknown DynDNS root domain",
@ -174,6 +228,8 @@
"domains_available": "Available domains:",
"done": "Done",
"downloading": "Downloading…",
"dpkg_is_broken": "You cannot do this right now because dpkg/APT (the system package managers) seems to be in a broken state… You can try to solve this issue by connecting through SSH and running `sudo dpkg --configure -a`.",
"dpkg_lock_not_available": "This command can't be ran right now because another program seems to be using the lock of dpkg (the system package manager)",
"dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.",
"dyndns_could_not_check_available": "Could not check if {domain:s} is available on {provider:s}.",
"dyndns_cron_installed": "DynDNS cron job created",
@ -252,8 +308,6 @@
"log_help_to_get_failed_log": "The operation '{desc}' could not be completed. Please share the full log of this operation using the command 'yunohost log display {name} --share' to get help",
"log_does_exists": "There is not operation log with the name '{log}', use 'yunohost log list' to see all available operation logs",
"log_operation_unit_unclosed_properly": "Operation unit has not been closed properly",
"log_app_fetchlist": "Add an app list",
"log_app_removelist": "Remove an app list",
"log_app_change_url": "Change the URL of the '{}' app",
"log_app_install": "Install the '{}' app",
"log_app_remove": "Remove the '{}' app",
@ -283,7 +337,7 @@
"log_user_update": "Update user info of '{}'",
"log_user_permission_update": "Update accesses for permission '{}'",
"log_user_permission_reset": "Reset permission '{}'",
"log_tools_maindomain": "Make '{}' the main domain",
"log_domain_main_domain": "Make '{}' as main domain",
"log_tools_migrations_migrate_forward": "Migrate forward",
"log_tools_postinstall": "Postinstall your YunoHost server",
"log_tools_upgrade": "Upgrade system packages",
@ -298,8 +352,8 @@
"mailbox_disabled": "E-mail turned off for user {user:s}",
"mailbox_used_space_dovecot_down": "The Dovecot mailbox service needs to be up, if you want to fetch used mailbox space",
"mail_unavailable": "This e-mail address is reserved and shall be automatically allocated to the very first user",
"maindomain_change_failed": "Could not change the main domain",
"maindomain_changed": "The main domain now changed",
"main_domain_change_failed": "Unable to change the main domain",
"main_domain_changed": "The main domain has been changed",
"migrate_tsig_end": "Migration to HMAC-SHA-512 finished",
"migrate_tsig_failed": "Could not migrate the DynDNS domain '{domain}' to HMAC-SHA-512, rolling back. Error: {error_code}, {error}",
"migrate_tsig_start": "Insufficiently secure key algorithm detected for TSIG signature of the domain '{domain}', initiating migration to the more secure HMAC-SHA-512",
@ -317,9 +371,10 @@
"migration_description_0007_ssh_conf_managed_by_yunohost_step1": "Let the SSH configuration be managed by YunoHost (step 1, automatic)",
"migration_description_0008_ssh_conf_managed_by_yunohost_step2": "Let the SSH configuration be managed by YunoHost (step 2, manual)",
"migration_description_0009_decouple_regenconf_from_services": "Decouple the regen-conf mechanism from services",
"migration_description_0010_migrate_to_apps_json": "Remove deprecated applists and use the new unified 'apps.json' list instead",
"migration_description_0010_migrate_to_apps_json": "Remove deprecated apps catalogs and use the new unified 'apps.json' list instead (outdated, replaced by migration 13)",
"migration_description_0011_setup_group_permission": "Set up user group and set up permission for apps and services",
"migration_description_0012_postgresql_password_to_md5_authentication": "Force PostgreSQL authentication to use MD5 for local connections",
"migration_description_0013_futureproof_apps_catalog_system": "Migrate to the new future-proof apps catalog system",
"migration_0003_start": "Starting migration to Stretch. The logs will be available in {logfile}.",
"migration_0003_patching_sources_list": "Patching the sources.lists…",
"migration_0003_main_upgrade": "Starting main upgrade…",
@ -330,7 +385,7 @@
"migration_0003_system_not_fully_up_to_date": "Your system is not fully up-to-date. Please perform a regular upgrade before running the migration to Stretch.",
"migration_0003_still_on_jessie_after_main_upgrade": "Something went wrong during the main upgrade: Is the system still on Jessie‽ To investigate the issue, please look at {log}:s…",
"migration_0003_general_warning": "Please note that this migration is a delicate operation. The YunoHost team did its best to review and test it, but the migration might still break parts of the system or its apps.\n\nTherefore, it is recommended to:\n - Perform a backup of any critical data or app. More info on https://yunohost.org/backup;\n - Be patient after launching the migration: Depending on your Internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external e-mail clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port (465) will automatically be closed, and the new port (587) will be opened in the firewall. You and your users *will* have to adapt the configuration of your e-mail clients accordingly.",
"migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an applist, or are not flagged as 'working'. Consequently, it cannot be guaranteed that they will still work after the upgrade: {problematic_apps}",
"migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an apps_catalog, or are not flagged as 'working'. Consequently, it cannot be guaranteed that they will still work after the upgrade: {problematic_apps}",
"migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten following the upgrade: {manually_modified_files}",
"migration_0005_postgresql_94_not_installed": "PostgreSQL was not installed on your system. Nothing to do.",
"migration_0005_postgresql_96_not_installed": "PostgreSQL 9.4 is installed, but not postgresql 9.6‽ Something weird might have happened on your system:(…",
@ -375,26 +430,9 @@
"migrations_skip_migration": "Skipping migration {id}…",
"migrations_success_forward": "Migration {id} completed",
"migrations_to_be_ran_manually": "Migration {id} has to be run manually. Please go to Tools → Migrations on the webadmin page, or run `yunohost tools migrations migrate`.",
"monitor_disabled": "Server monitoring now off",
"monitor_enabled": "Server monitoring now on",
"monitor_glances_con_failed": "Could not connect to Glances server",
"monitor_not_enabled": "Server monitoring is off",
"monitor_period_invalid": "Invalid time period",
"monitor_stats_file_not_found": "Could not find the statistics file",
"monitor_stats_no_update": "No monitoring statistics to update",
"monitor_stats_period_unavailable": "No available statistics for the period",
"mountpoint_unknown": "Unknown mountpoint",
"mysql_db_creation_failed": "Could not create MySQL database",
"mysql_db_init_failed": "Could not initialize MySQL database",
"mysql_db_initialized": "The MySQL database is now initialized",
"network_check_mx_ko": "DNS MX record is not set",
"network_check_smtp_ko": "Outbound e-mail (SMTP port 25) seems to be blocked by your network",
"network_check_smtp_ok": "Outbound e-mail (SMTP port 25) is not blocked",
"no_internet_connection": "The server is not connected to the Internet",
"not_enough_disk_space": "Not enough free space on '{path:s}'",
"operation_interrupted": "Was the operation manually interrupted?",
"package_not_installed": "The package '{pkgname}' is not installed",
"package_unexpected_error": "An unexpected error occurred processing the package '{pkgname}'",
"operation_interrupted": "The operation was manually interrupted?",
"package_unknown": "Unknown package '{pkgname}'",
"packages_upgrade_failed": "Could not upgrade all the packages",
"password_listed": "This password is among the most used password in the world. Please choose something more unique.",
@ -433,8 +471,6 @@
"permission_require_account": "Permission {permission} only makes sense for users having an account, and therefore cannot be enabled for visitors.",
"port_already_closed": "Port {port:d} is already closed for {ip_version:s} connections",
"port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections",
"port_available": "Port {port:d} is available",
"port_unavailable": "Port {port:d} is not available",
"recommend_to_add_first_user": "The post-install is finished, but YunoHost needs at least one user to work correctly, you should add one using 'yunohost user create <username>' or do it from the admin interface.",
"regenconf_file_backed_up": "Configuration file '{conf}' backed up to '{backup}'",
"regenconf_file_copy_failed": "Could not copy the new configuration file '{new}' to '{conf}'",
@ -481,7 +517,6 @@
"service_description_dnsmasq": "Handles domain name resolution (DNS)",
"service_description_dovecot": "Allows e-mail clients to access/fetch email (via IMAP and POP3)",
"service_description_fail2ban": "Protects against brute-force and other kinds of attacks from the Internet",
"service_description_glances": "Monitors system info on your server",
"service_description_metronome": "Manage XMPP instant messaging accounts",
"service_description_mysql": "Stores app data (SQL database)",
"service_description_nginx": "Serves or provides access to all the websites hosted on your server",
@ -531,7 +566,6 @@
"tools_upgrade_special_packages_completed": "YunoHost package upgrade completed.\nPress [Enter] to get the command line back",
"unbackup_app": "App '{app:s}' will not be saved",
"unexpected_error": "Something unexpected went wrong: {error}",
"unit_unknown": "Unknown unit '{unit:s}'",
"unlimit": "No quota",
"unrestore_app": "App '{app:s}' will not be restored",
"update_apt_cache_failed": "Could not to update the cache of APT (Debian's package manager). Here is a dump of the sources.list lines, which might help identify problematic lines: \n{sourceslist}",

View file

@ -141,7 +141,7 @@
"field_invalid": "Nevalida kampo '{:s}'",
"log_app_makedefault": "Faru '{}' la defaŭlta apliko",
"migration_0003_still_on_jessie_after_main_upgrade": "Io okazis malbone dum la ĉefa ĝisdatigo: Ĉu la sistemo ankoraŭ estas en Jessie‽ Por esplori la aferon, bonvolu rigardi {log}:s …",
"migration_0011_can_not_backup_before_migration": "La sekurkopio de la sistemo antaŭ la migrado malsukcesis. Migrado malsukcesis. Eraro: {error:s}",
"migration_0011_can_not_backup_before_migration": "La sekurkopio de la sistemo ne povis finiĝi antaŭ ol la migrado malsukcesis. Eraro: {error:s}",
"migration_0011_create_group": "Krei grupon por ĉiu uzanto…",
"backup_system_part_failed": "Ne eblis sekurkopi la sistemon de '{part:s}'",
"global_settings_setting_security_postfix_compatibility": "Kongruo vs sekureca kompromiso por la Postfix-servilo. Afektas la ĉifradojn (kaj aliajn aspektojn pri sekureco)",
@ -151,8 +151,8 @@
"migration_0011_backup_before_migration": "Krei sekurkopion de LDAP-datumbazo kaj agordojn antaŭ la efektiva migrado.",
"migration_0011_LDAP_config_dirty": "Similas ke vi agordis vian LDAP-agordon. Por ĉi tiu migrado la LDAP-agordo bezonas esti ĝisdatigita.\nVi devas konservi vian aktualan agordon, reintaligi la originalan agordon per funkciado de \"yunohost iloj regen-conf -f\" kaj reprovi la migradon",
"migration_0011_migrate_permission": "Migrado de permesoj de agordoj al aplikoj al LDAP…",
"migration_0011_migration_failed_trying_to_rollback": "Migrado malsukcesis ... provante reverti la sistemon.",
"migrations_dependencies_not_satisfied": "Ne eblas kuri migradon {id} ĉar unue vi devas ruli ĉi tiujn migradojn: {dependencies_id}",
"migration_0011_migration_failed_trying_to_rollback": "Ne povis migri ... provante redakti la sistemon.",
"migrations_dependencies_not_satisfied": "Rulu ĉi tiujn migradojn: '{dependencies_id}', antaŭ migrado {id}.",
"migrations_failed_to_load_migration": "Ne povis ŝarĝi migradon {id}: {error}",
"migrations_exclusive_options": "'--auto', '--skip' kaj '--force-rerun' estas reciproke ekskluzivaj ebloj.",
"migrations_must_provide_explicit_targets": "Vi devas provizi eksplicitajn celojn kiam vi uzas '--skip' aŭ '--force-rerun'",
@ -162,7 +162,7 @@
"tools_upgrade_cant_hold_critical_packages": "Ne povis teni kritikajn pakojn…",
"upnp_dev_not_found": "Neniu UPnP-aparato trovita",
"migration_description_0012_postgresql_password_to_md5_authentication": "Devigu PostgreSQL-aŭtentigon uzi MD5 por lokaj ligoj",
"migration_0011_done": "Migrado sukcesis. Vi nun kapablas administri uzantajn grupojn.",
"migration_0011_done": "Migrado finiĝis. Vi nun kapablas administri uzantajn grupojn.",
"migration_0011_LDAP_update_failed": "Ne povis ĝisdatigi LDAP. Eraro: {error:s}",
"pattern_password": "Devas esti almenaŭ 3 signoj longaj",
"root_password_desynchronized": "La pasvorta administranto estis ŝanĝita, sed YunoHost ne povis propagandi ĉi tion al la radika pasvorto!",
@ -194,9 +194,9 @@
"migration_0011_rollback_success": "Sistemo ruliĝis reen.",
"migration_0011_update_LDAP_database": "Ĝisdatigante LDAP-datumbazon…",
"migration_0011_update_LDAP_schema": "Ĝisdatigante LDAP-skemon…",
"migration_0011_failed_to_remove_stale_object": "Malsukcesis forigi neokazan objekton {dn}: {error}",
"migration_0011_failed_to_remove_stale_object": "Ne povis forigi neuzatan objekton {dn}: {error}",
"migrations_already_ran": "Tiuj migradoj estas jam faritaj: {ids}",
"migrations_no_such_migration": "Estas neniu migrado nomata {id}",
"migrations_no_such_migration": "Estas neniu migrado nomata '{id}'",
"permission_already_allowed": "Grupo '{group}' jam havas permeson '{permission}' ebligita'",
"permission_already_disallowed": "Grupo '{group}' jam havas permeson '{permission}' malebligita'",
"permission_cannot_remove_main": "Forigo de ĉefa permeso ne rajtas",
@ -266,7 +266,7 @@
"migration_description_0008_ssh_conf_managed_by_yunohost_step2": "Lasu la SSH-agordon estu administrata de YunoHost (paŝo 2, manlibro)",
"restore_confirm_yunohost_installed": "Ĉu vi vere volas restarigi jam instalitan sistemon? [{answers:s}]",
"pattern_positive_number": "Devas esti pozitiva nombro",
"monitor_stats_file_not_found": "Statistika dosiero ne trovita",
"monitor_stats_file_not_found": "Ne povis trovi la statistikan dosieron",
"certmanager_error_no_A_record": "Neniu DNS 'A' rekordo trovita por '{domain:s}'. Vi bezonas atentigi vian domajnan nomon al via maŝino por povi instali atestilon Lasu-Ĉifri. (Se vi scias, kion vi faras, uzu '--no-checks' por malŝalti tiujn ĉekojn.)",
"update_apt_cache_failed": "Ne eblis ĝisdatigi la kaŝmemoron de APT (paka administranto de Debian). Jen rubujo de la sources.list-linioj, kiuj povus helpi identigi problemajn liniojn:\n{sourcelist}",
"migrations_no_migrations_to_run": "Neniuj migradoj por funkcii",
@ -339,7 +339,7 @@
"log_app_upgrade": "Ĝisdatigu la aplikon '{}'",
"log_help_to_get_failed_log": "La operacio '{desc}' ne povis finiĝi. Bonvolu dividi la plenan ŝtipon de ĉi tiu operacio per la komando 'yunohost log display {name} --share' por akiri helpon",
"migration_description_0002_migrate_to_tsig_sha256": "Plibonigu sekurecon de DynDNS TSIG-ĝisdatigoj per SHA-512 anstataŭ MD5",
"monitor_disabled": "Servila monitorado nun malŝaltis",
"monitor_disabled": "Servilo-monitorado nun malŝaltita",
"pattern_port": "Devas esti valida havena numero (t.e. 0-65535)",
"port_already_closed": "Haveno {port:d} estas jam fermita por {ip_version:s} rilatoj",
"hook_name_unknown": "Nekonata hoko-nomo '{name:s}'",
@ -407,7 +407,7 @@
"migration_0003_not_jessie": "La nuna Debian-distribuo ne estas Jessie!",
"user_unknown": "Nekonata uzanto: {user:s}",
"migrations_to_be_ran_manually": "Migrado {id} devas funkcii permane. Bonvolu iri al Iloj → Migradoj en la retpaĝa paĝo, aŭ kuri `yunohost tools migrations migrate`.",
"migration_0008_warning": "Se vi komprenas tiujn avertojn kaj konsentas lasi YunoHost pretervidi vian nunan agordon, faru la migradon. Alie, vi ankaŭ povas salti la migradon - kvankam ĝi ne rekomendas.",
"migration_0008_warning": "Se vi komprenas tiujn avertojn kaj volas ke YunoHost preterlasu vian nunan agordon, faru la migradon. Alie, vi ankaŭ povas salti la migradon, kvankam ĝi ne rekomendas.",
"certmanager_cert_renew_success": "Ni Ĉifru atestilon renovigitan por la domajno '{domain:s}'",
"global_settings_reset_success": "Antaŭaj agordoj nun estas rezervitaj al {path:s}",
"pattern_domain": "Devas esti valida domajna nomo (t.e. mia-domino.org)",
@ -477,14 +477,14 @@
"log_tools_maindomain": "Faru de '{}' la ĉefa domajno",
"maindomain_change_failed": "Ne povis ŝanĝi la ĉefan domajnon",
"mail_domain_unknown": "Nevalida retadreso por domajno '{domain:s}'. Bonvolu uzi domajnon administritan de ĉi tiu servilo.",
"migrations_cant_reach_migration_file": "Ne povis aliri migrajn dosierojn ĉe la vojo% s",
"migrations_cant_reach_migration_file": "Ne povis aliri migrajn dosierojn ĉe la vojo '% s'",
"pattern_email": "Devas esti valida retpoŝtadreso (t.e.iu@domain.org)",
"mail_alias_remove_failed": "Ne povis forigi retpoŝton alias '{mail:s}'",
"regenconf_file_manually_removed": "La dosiero de agordo '{conf}' estis forigita permane, kaj ne estos kreita",
"monitor_enabled": "Servila monitorado nun ŝaltis",
"monitor_enabled": "Servilo-monitorado nun",
"domain_exists": "La domajno jam ekzistas",
"migration_description_0001_change_cert_group_to_sslcert": "Ŝanĝu grupajn permesojn de 'metronomo' al 'ssl-cert'",
"mysql_db_creation_failed": "MySQL-datumbazkreado malsukcesis",
"mysql_db_creation_failed": "Ne povis krei MySQL-datumbazon",
"ldap_initialized": "LDAP inicializis",
"migrate_tsig_not_needed": "Vi ne ŝajnas uzi DynDNS-domajnon, do neniu migrado necesas.",
"certmanager_domain_cert_not_selfsigned": "La atestilo por domajno {domajno:s} ne estas mem-subskribita. Ĉu vi certas, ke vi volas anstataŭigi ĝin? (Uzu '--force' por fari tion.)",
@ -495,7 +495,7 @@
"global_settings_bad_choice_for_enum": "Malbona elekto por agordo {setting:s}, ricevita '{choice:s}', sed disponeblaj elektoj estas: {available_choices:s}",
"server_shutdown": "La servilo haltos",
"log_tools_migrations_migrate_forward": "Migri antaŭen",
"migration_0008_no_warning": "Neniu grava risko identigita pri superregado de via SSH-agordo, tamen oni ne povas esti absolute certa;)! Ekfunkciu la migradon por superregi ĝin. Alie, vi ankaŭ povas salti la migradon - kvankam ĝi ne rekomendas.",
"migration_0008_no_warning": "Supersalti vian SSH-agordon estu sekura, kvankam ĉi tio ne povas esti promesita! Ekfunkciu la migradon por superregi ĝin. Alie, vi ankaŭ povas salti la migradon, kvankam ĝi ne rekomendas.",
"regenconf_now_managed_by_yunohost": "La agorda dosiero '{conf}' nun estas administrata de YunoHost (kategorio {category}).",
"server_reboot_confirm": "Ĉu la servilo rekomencos tuj, ĉu vi certas? [{answers:s}]",
"log_app_install": "Instalu la aplikon '{}'",
@ -563,5 +563,17 @@
"permission_currently_allowed_for_visitors": "Ĉi tiu permeso estas nuntempe donita al vizitantoj aldone al aliaj grupoj. Vi probable volas aŭ forigi la permeson de \"vizitantoj\" aŭ forigi la aliajn grupojn al kiuj ĝi nun estas koncedita.",
"permission_currently_allowed_for_all_users": "Ĉi tiu permeso estas nuntempe donita al ĉiuj uzantoj aldone al aliaj grupoj. Vi probable volas aŭ forigi la permeson \"all_users\" aŭ forigi la aliajn grupojn, kiujn ĝi nuntempe donas.",
"app_install_failed": "Ne povis instali {app} : {error}",
"app_install_script_failed": "Eraro okazis en la skripto de instalado de la app"
"app_install_script_failed": "Eraro okazis en la skripto de instalado de la app",
"app_remove_after_failed_install": "Forigado de la app post la instala fiasko …",
"diagnosis_basesystem_host": "Servilo funkcias Debian {debian_version}.",
"apps_catalog_init_success": "Aplikoj katalogsistemo inicializita !",
"apps_catalog_updating": "Ĝisdatigante katalogo de aplikoj ...",
"apps_catalog_failed_to_download": "Ne eblas elŝuti la katalogon de {apps_catalog}: {error}",
"apps_catalog_obsolete_cache": "La kaŝmemoro de la katalogo de programoj estas malplena aŭ malaktuala.",
"apps_catalog_update_success": "La aplika katalogo estis ĝisdatigita!",
"diagnosis_basesystem_kernel": "Servilo funkcias Linuksan kernon {kernel_version}",
"diagnosis_basesystem_ynh_single_version": "{0} versio: {1} ({2})",
"diagnosis_basesystem_ynh_main_version": "Servilo funkcias YunoHost {main_version} ({repo})",
"diagnosis_basesystem_ynh_inconsistent_versions": "Vi prizorgas malkonsekvencajn versiojn de la YunoHost-pakoj... plej probable pro malsukcesa aŭ parta ĝisdatigo.",
"diagnosis_display_tip_web": "Vi povas iri al la sekcio Diagnozo (en la hejmekrano) por vidi la trovitajn problemojn."
}

View file

@ -121,8 +121,8 @@
"mail_alias_remove_failed": "No se pudo eliminar el alias de correo «{mail:s}»",
"mail_domain_unknown": "Dirección de correo no válida para el dominio «{domain:s}». Use un dominio administrado por este servidor.",
"mail_forward_remove_failed": "No se pudo eliminar el reenvío de correo «{mail:s}»",
"maindomain_change_failed": "No se pudo cambiar el dominio principal",
"maindomain_changed": "El dominio principal ha cambiado",
"main_domain_change_failed": "No se pudo cambiar el dominio principal",
"main_domain_changed": "El dominio principal ha cambiado",
"monitor_disabled": "La monitorización del servidor está ahora desactivada",
"monitor_enabled": "La monitorización del servidor está ahora activada",
"monitor_glances_con_failed": "No se pudo conectar al servidor de Glances",
@ -637,5 +637,26 @@
"permission_already_up_to_date": "El permiso no se ha actualizado porque las peticiones de incorporación o eliminación ya coinciden con el estado actual.",
"permission_currently_allowed_for_visitors": "Este permiso se concede actualmente a los visitantes además de otros grupos. Probablemente quiere o eliminar el permiso de «visitors» o eliminar los otros grupos a los que está otorgado actualmente.",
"permission_currently_allowed_for_all_users": "Este permiso se concede actualmente a todos los usuarios además de los otros grupos. Probablemente quiere o eliminar el permiso de «all_users» o eliminar los otros grupos a los que está otorgado actualmente.",
"permission_require_account": "El permiso {permission} solo tiene sentido para usuarios con una cuenta y, por lo tanto, no se puede activar para visitantes."
"permission_require_account": "El permiso {permission} solo tiene sentido para usuarios con una cuenta y, por lo tanto, no se puede activar para visitantes.",
"app_remove_after_failed_install": "Eliminando la aplicación tras el fallo de instalación…",
"diagnosis_basesystem_host": "El servidor está ejecutando Debian {debian_version}.",
"diagnosis_basesystem_kernel": "El servidor está ejecutando el núcleo de Linux {kernel_version}",
"diagnosis_basesystem_ynh_single_version": "{0} versión: {1} ({2})",
"diagnosis_basesystem_ynh_main_version": "El servidor está ejecutando YunoHost {main_version} ({repo})",
"diagnosis_basesystem_ynh_inconsistent_versions": "Está ejecutando versiones incoherentes de los paquetes de YunoHost... probablemente por una actualización errónea o parcial.",
"diagnosis_failed_for_category": "Diagnóstico fallido para la categoría «{category}» : {error}",
"diagnosis_cache_still_valid": "(Caché aún válida para el diagnóstico de {category}. ¡Aún no se ha rediagnosticado!)",
"diagnosis_found_errors_and_warnings": "¡Encontrado(s) error(es) significativo(s) {errors} (y aviso(s) {warnings}) relacionado(s) con {category}!",
"diagnosis_display_tip_web": "Puede ir a la sección de diagnóstico (en la pantalla principal) para ver los problemas encontrados.",
"diagnosis_display_tip_cli": "Puede ejecutar «yunohost diagnosis show --issues» para mostrar los problemas encontrados.",
"apps_catalog_init_success": "¡Sistema de catálogo de aplicaciones inicializado!",
"apps_catalog_updating": "Actualizando catálogo de aplicaciones...",
"apps_catalog_failed_to_download": "No se pudo descargar el catálogo de aplicaciones {apps_catalog}: {error}",
"apps_catalog_obsolete_cache": "La caché del catálogo de aplicaciones está vacía u obsoleta.",
"apps_catalog_update_success": "¡El catálogo de aplicaciones ha sido actualizado!",
"diagnosis_cant_run_because_of_dep": "No se puede ejecutar el diagnóstico para {category} mientras haya problemas importantes relacionados con {dep}.",
"diagnosis_ignored_issues": "(+ {nb_ignored} problema(s) ignorado(s))",
"diagnosis_found_errors": "¡Encontrado(s) error(es) significativo(s) {errors} relacionado(s) con {category}!",
"diagnosis_found_warnings": "Encontrado elemento(s) {warnings} que puede(n) ser mejorado(s) para {category}.",
"diagnosis_everything_ok": "¡Todo se ve bien para {category}!"
}

View file

@ -1 +1,3 @@
{}
{
"password_too_simple_1": "Pasahitzak gutxienez 8 karaktere izan behar ditu"
}

View file

@ -77,7 +77,7 @@
"domain_created": "Le domaine a été créé",
"domain_creation_failed": "Impossible de créer le domaine {domain}: {error}",
"domain_deleted": "Le domaine a été supprimé",
"domain_deletion_failed": "Impossible de supprimer le domaine {domain}: {error}",
"domain_deletion_failed": "Impossible de supprimer le domaine {domain}:{error}",
"domain_dyndns_already_subscribed": "Vous avez déjà souscris à un domaine DynDNS",
"domain_dyndns_invalid": "Domaine incorrect pour un usage avec DynDNS",
"domain_dyndns_root_unknown": "Domaine DynDNS principal inconnu",
@ -122,8 +122,8 @@
"mail_alias_remove_failed": "Impossible de supprimer lalias de courriel '{mail:s}'",
"mail_domain_unknown": "Le domaine '{domain:s}' de cette adress de courriel n'est pas valide. Merci d'utiliser un domain administré par ce serveur.",
"mail_forward_remove_failed": "Impossible de supprimer le courriel de transfert '{mail:s}'",
"maindomain_change_failed": "Impossible de modifier le domaine principal",
"maindomain_changed": "Le domaine principal modifié",
"main_domain_change_failed": "Impossible de modifier le domaine principal",
"main_domain_changed": "Le domaine principal modifié",
"monitor_disabled": "Surveillance du serveur est maintenant arrêté",
"monitor_enabled": "La supervision du serveur est maintenant allumée",
"monitor_glances_con_failed": "Impossible de se connecter au serveur Glances",
@ -270,7 +270,7 @@
"ldap_init_failed_to_create_admin": "Linitialisation de l'annuaire LDAP na pas réussi à créer lutilisateur admin",
"ssowat_persistent_conf_read_error": "Impossible de lire la configuration persistante de SSOwat : {error:s}. Modifiez le fichier /etc/ssowat/conf.json.persistent pour réparer la syntaxe JSON",
"ssowat_persistent_conf_write_error": "Impossible de sauvegarder de la configuration persistante de SSOwat : {error:s}. Modifiez le fichier /etc/ssowat/conf.json.persistent pour réparer la syntaxe JSON",
"domain_cannot_remove_main": "Impossible de supprimer le domaine principal. Définissez d'abord un nouveau domaine principal",
"domain_cannot_remove_main": "Vous ne pouvez pas supprimer '{domain: s}' car il s'agit du domaine principal. Vous devez d'abord définir un autre domaine comme domaine principal à l'aide de 'yunohost domain main-domain -n <another-domain>', voici la liste des domaines candidats. : {other_domains: s}",
"certmanager_self_ca_conf_file_not_found": "Le fichier de configuration pour lautorité du certificat auto-signé est introuvable (fichier : {file:s})",
"certmanager_unable_to_parse_self_CA_name": "Impossible danalyser le nom de lautorité du certificat auto-signé (fichier : {file:s})",
"mailbox_used_space_dovecot_down": "Le service de courriel Dovecot doit être démarré, si vous souhaitez voir lespace disque occupé par la messagerie",
@ -394,7 +394,7 @@
"migration_0003_system_not_fully_up_to_date": "Votre système nest pas complètement à jour. Veuillez mener une mise à jour classique avant de lancer à migration à Stretch.",
"migration_0003_still_on_jessie_after_main_upgrade": "Quelque chose sest mal passé pendant la mise à niveau principale : le système est toujours sur Debian Jessie !? Pour investiguer sur le problème, veuillez regarder les journaux {log}:s …",
"migration_0003_general_warning": "Veuillez noter que cette migration est une opération délicate. Si léquipe YunoHost a fait de son mieux pour la relire et la tester, la migration pourrait tout de même casser des parties de votre système ou de vos applications.\n\nEn conséquence, nous vous recommandons :\n - de lancer une sauvegarde de vos données ou applications critiques. Plus dinformations sur https://yunohost.org/backup ;\n - dêtre patient après avoir lancé la migration : selon votre connexion internet et matériel, cela pourrait prendre jusquà quelques heures pour que tout soit à niveau.\n\nEn outre, le port SMTP utilisé par les clients de messagerie externes comme (Thunderbird ou K9-Mail) a été changé de 465 (SSL/TLS) à 587 (STARTTLS). Lancien port 465 sera automatiquement fermé et le nouveau port 587 sera ouvert dans le pare-feu. Vous et vos utilisateurs *devront* adapter la configuration de vos clients de messagerie en conséquence.",
"migration_0003_problematic_apps_warning": "Veuillez noter que des applications possiblement problématiques ont été détectées. Il semble quelles naient pas été installées depuis une liste dapplication ou quelles ne soit pas marquées comme « fonctionnelles ». En conséquence, nous ne pouvons pas garantir quelles fonctionneront après la mise à niveau : {problematic_apps}",
"migration_0003_problematic_apps_warning": "Veuillez noter que les applications installées potentiellement problématiques suivantes ont été détectées. Il semble que celles-ci n'ont pas été installées à partir d'un catalogue d'applications, ou ne sont pas marquées comme \"working \". Par conséquent, il ne peut pas être garanti qu'ils fonctionneront toujours après la mise à niveau: {problematic_apps}",
"migration_0003_modified_files": "Veuillez noter que les fichiers suivants ont été détectés comme modifiés manuellement et pourraient être écrasés à la fin de la mise à niveau : {manually_modified_files}",
"migrations_list_conflict_pending_done": "Vous ne pouvez pas utiliser --previous et --done simultanément.",
"migrations_to_be_ran_manually": "La migration {id} doit être lancée manuellement. Veuillez aller dans Outils > Migrations dans linterface admin, ou lancer `yunohost tools migrations migrate`.",
@ -454,7 +454,7 @@
"log_user_create": "Ajouter lutilisateur '{}'",
"log_user_delete": "Supprimer lutilisateur '{}'",
"log_user_update": "Mettre à jour les informations de lutilisateur '{}'",
"log_tools_maindomain": "Faire de '{}' le domaine principal",
"log_domain_main_domain": "Faire de '{}' le domaine principal",
"log_tools_migrations_migrate_forward": "Migrer vers",
"log_tools_migrations_migrate_backward": "Revenir en arrière",
"log_tools_postinstall": "Faire la post-installation de votre serveur YunoHost",
@ -541,7 +541,7 @@
"global_settings_setting_security_ssh_compatibility": "Compatibilité versus compromis sécuritaire pour le serveur SSH. Affecte les cryptogrammes (et d'autres aspects liés à la sécurité)",
"global_settings_setting_security_postfix_compatibility": "Compatibilité versus compromis sécuritaire pour le serveur Postfix. Affecte les cryptogrammes (et d'autres aspects liés à la sécurité)",
"migration_description_0009_decouple_regenconf_from_services": "Dissocier le mécanisme « regen-conf » des services",
"migration_description_0010_migrate_to_apps_json": "Supprimer les listes d'applications obsolètes et utiliser la nouvelle liste unifiée 'apps.json' à la place",
"migration_description_0010_migrate_to_apps_json": "Supprimez les catalogues d'applications obsolètes et utilisez à la place la nouvelle liste unifiée 'apps.json' (obsolète, remplacée par la migration 13).",
"regenconf_file_kept_back": "Le fichier de configuration '{conf}' devait être supprimé par « regen-conf » (catégorie {category}) mais a été conservé.",
"regenconf_updated": "La configuration a été mise à jour pour la catégorie '{category}'",
"regenconf_would_be_updated": "La configuration aurait dû être mise à jour pour la catégorie '{category}'",
@ -625,7 +625,7 @@
"migrations_running_forward": "Exécution de la migration {id}…",
"migrations_success_forward": "Migration {id} terminée",
"need_define_permission_before": "Redéfinissez l'autorisation à l'aide de 'yunohost user permission add -u USER' avant de supprimer un groupe autorisé",
"operation_interrupted": "L'opération a-t-elle été interrompue manuellement ?",
"operation_interrupted": "L'opération a été interrompue manuellement ?",
"permission_already_clear": "L'autorisation '{permission: s}' est déjà vide pour l'application {app: s}",
"permission_already_exist": "L'autorisation '{permission}' existe déjà",
"permission_created": "Permission '{permission:s}' créée",
@ -664,5 +664,82 @@
"permission_currently_allowed_for_all_users": "Cette autorisation est actuellement accordée à tous les utilisateurs en plus des autres groupes. Vous voudrez probablement soit supprimer l'autorisation 'all_users', soit supprimer les autres groupes auxquels il est actuellement autorisé.",
"app_install_failed": "Impossible d'installer {app}: {error}",
"app_install_script_failed": "Une erreur est survenue dans le script d'installation de l'application",
"permission_require_account": "Permission {permission} n'a de sens que pour les utilisateurs ayant un compte et ne peut donc pas être activé pour les visiteurs."
"permission_require_account": "Permission {permission} n'a de sens que pour les utilisateurs ayant un compte et ne peut donc pas être activé pour les visiteurs.",
"app_remove_after_failed_install": "Supprimer l'application après l'échec de l'installation…",
"diagnosis_display_tip_web": "Vous pouvez aller à la section Diagnostic (dans l'écran d'accueil) pour voir les problèmes rencontrés.",
"diagnosis_cant_run_because_of_dep": "Impossible d'exécuter le diagnostic pour {category} alors qu'il existe des problèmes importants liés à {dep}.",
"diagnosis_found_errors": "Trouvé {errors} problème(s) significatif(s) lié(s) à {category} !",
"diagnosis_found_errors_and_warnings": "Trouvé {errors} problème(s) significatif(s) (et {warnings} (avertissement(s)) en relation avec {category} !",
"diagnosis_ip_not_connected_at_all": "Le serveur ne semble pas du tout connecté à Internet !?",
"diagnosis_ip_weird_resolvconf": "La résolution DNS semble fonctionner, mais soyer prudent en utilisant un fichier /etc/resolv.conf personnalisé.",
"diagnosis_ip_weird_resolvconf_details": "Au lieu de cela, ce fichier devrait être un lien symbolique vers /etc/resolvconf/run/resolv.conf lui-même pointant vers 127.0.0.1 (dnsmasq). Les résolveurs réels doivent être configurés via /etc/resolv.dnsmasq.conf.",
"diagnosis_dns_missing_record": "Selon la configuration DNS recommandée, vous devez ajouter un enregistrement DNS de type {0}, nom {1} et valeur {2}",
"diagnosis_diskusage_ok": "Le stockage {mountpoint} (sur le périphérique {device}) a toujours {espace libre {free_abs_GB} GB ({free_percent}%) !",
"diagnosis_ram_ok": "Le système dispose toujours de {available_abs_MB} MB ({available_percent}%) de RAM sur {total_abs_MB} MB.",
"diagnosis_regenconf_allgood": "Tous les fichiers de configuration sont conformes à la configuration recommandée !",
"diagnosis_security_vulnerable_to_meltdown": "Vous semblez vulnérable à la vulnérabilité de sécurité critique de Meltdown",
"diagnosis_basesystem_host": "Le serveur exécute Debian {debian_version}.",
"diagnosis_basesystem_kernel": "Le serveur exécute le noyau Linux {kernel_version}",
"diagnosis_basesystem_ynh_single_version": "{0} version: {1} ({2})",
"diagnosis_basesystem_ynh_main_version": "Le serveur exécute YunoHost {main_version} ({repo})",
"diagnosis_basesystem_ynh_inconsistent_versions": "Vous exécutez des versions incohérentes des packages YunoHost ... probablement à cause d'une mise à niveau partielle ou échouée.",
"diagnosis_display_tip_cli": "Vous pouvez exécuter 'yunohost diagnosis show --issues' pour afficher les problèmes détectés.",
"diagnosis_failed_for_category": "Échec du diagnostic pour la catégorie '{category}' : {error}",
"diagnosis_cache_still_valid": "(Le cache est toujours valide pour le diagnostic {category}. Pas re-diagnostiquer pour le moment!)",
"diagnosis_ignored_issues": "(+ {nb_ignored} questions ignorée(s))",
"diagnosis_found_warnings": "Trouvé {warnings} objet(s) pouvant être amélioré(s) pour {category}.",
"diagnosis_everything_ok": "Tout semble bien pour {category} !",
"diagnosis_failed": "Impossible d'extraire le résultat du diagnostic pour la catégorie '{category}': {error}",
"diagnosis_ip_connected_ipv4": "Le serveur est connecté à Internet via IPv4 !",
"diagnosis_ip_no_ipv4": "Le serveur ne dispose pas dune adresse IPv4 active.",
"diagnosis_ip_connected_ipv6": "Le serveur est connecté à Internet via IPv6 !",
"diagnosis_ip_no_ipv6": "Le serveur ne dispose pas d'une adresse IPv6 active.",
"diagnosis_ip_dnsresolution_working": "La résolution de nom de domaine fonctionne !",
"diagnosis_ip_broken_dnsresolution": "La résolution du nom de domaine semble interrompue pour une raison quelconque ... Un pare-feu bloque-t-il les requêtes DNS ?",
"diagnosis_ip_broken_resolvconf": "La résolution du nom de domaine semble cassée sur votre serveur, ce qui semble lié au fait que /etc/resolv.conf ne pointe pas vers 127.0.0.1.",
"diagnosis_dns_good_conf": "Bonne configuration DNS pour le domaine {domain} (catégorie {category})",
"diagnosis_dns_bad_conf": "Configuration DNS incorrecte/manquante pour le domaine {domain} (catégorie {category})",
"diagnosis_dns_discrepancy": "Selon la configuration DNS recommandée, la valeur de l'enregistrement DNS de type {0} et nom {1} doit être {2} et non {3}.",
"diagnosis_services_bad_status": "Le service {service} est {status} :/",
"diagnosis_services_good_status": "Le service {service} est {status} comme prévu !",
"diagnosis_diskusage_verylow": "Le stockage {mountpoint} (sur le périphérique {device}) ne dispose que de {free_abs_GB} Go ({free_percent}%). Vous devriez vraiment envisager de nettoyer un peu d'espace.",
"diagnosis_diskusage_low": "Le stockage {mountpoint} (sur le périphérique {device}) ne dispose que de {free_abs_GB} Go ({free_percent}%). Faites attention.",
"diagnosis_ram_verylow": "Le système ne dispose plus que de {available_abs_MB} MB ({available_percent}%)! (sur {total_abs_MB} Mo)",
"diagnosis_ram_low": "Le système n'a plus de {available_abs_MB} MB ({available_percent}%) RAM sur {total_abs_MB} MB. Faites attention.",
"diagnosis_swap_none": "Le système n'a aucun échange. Vous devez envisager dajouter au moins 256 Mo de swap pour éviter les situations où le système manque de mémoire.",
"diagnosis_swap_notsomuch": "Le système ne dispose que de {total_MB} Mo de swap. Vous devez envisager d'avoir au moins 256 Mo pour éviter les situations où le système manque de mémoire.",
"diagnosis_swap_ok": "Le système dispose de {total_MB} Mo de swap !",
"diagnosis_regenconf_manually_modified": "Le fichier de configuration {file} a été modifié manuellement.",
"diagnosis_regenconf_manually_modified_debian": "Le fichier de configuration {file} a été modifié manuellement par rapport à celui par défaut de Debian.",
"diagnosis_regenconf_manually_modified_details": "C'est probablement OK tant que vous savez ce que vous faites;) !",
"diagnosis_regenconf_manually_modified_debian_details": "Cela peut probablement être OK, mais il faut garder un œil dessus ...",
"diagnosis_regenconf_nginx_conf_broken": "La configuration de nginx semble être cassée !",
"diagnosis_security_all_good": "Aucune vulnérabilité de sécurité critique n'a été trouvée.",
"apps_catalog_init_success": "Système de catalogue d'applications initialisé !",
"apps_catalog_failed_to_download": "Impossible de télécharger le catalogue des applications {apps_catalog}:{error}",
"diagnosis_mail_ougoing_port_25_blocked": "Le port sortant 25 semble être bloqué. Vous devriez essayer de le débloquer dans le panneau de configuration de votre fournisseur de services Internet (ou hébergeur). En attendant, le serveur ne pourra pas envoyer de courrier électronique à d'autres serveurs.",
"domain_cannot_remove_main_add_new_one": "Vous ne pouvez pas supprimer '{domain:s}' car il s'agit du domaine principal et de votre seul domaine. Vous devez d'abord ajouter un autre domaine à l'aide de 'yunohost domain add <another-domain.com>', puis définir comme domaine principal à l'aide de ' yunohost domain main-domain -n <nomd'un-autre-domaine.com>' et vous pouvez ensuite supprimer le domaine '{domaine: s}' à l'aide de 'yunohost domain remove {domain:s}'.'",
"diagnosis_security_vulnerable_to_meltdown_details": "Pour résoudre ce problème, vous devez mettre à niveau votre système et redémarrer pour charger le nouveau noyau Linux (ou contacter votre fournisseur de serveur si cela ne fonctionne pas). Voir https://meltdownattack.com/ pour plus d'informations.",
"diagnosis_description_basesystem": "Système de base",
"diagnosis_description_ip": "Connectivité Internet",
"diagnosis_description_dnsrecords": "Enregistrements DNS",
"diagnosis_description_services": "Vérification de l'état des services",
"diagnosis_description_systemresources": "Ressources système",
"diagnosis_description_ports": "Exposition des ports",
"diagnosis_description_http": "Exposition HTTP",
"diagnosis_description_regenconf": "Configurations système",
"diagnosis_description_security": "Contrôles de sécurité",
"diagnosis_ports_could_not_diagnose": "Impossible de diagnostiquer si les ports sont accessibles de l'extérieur. Erreur: {error}",
"apps_catalog_updating": "Mise à jour du catalogue d'applications...",
"apps_catalog_obsolete_cache": "Le cache du catalogue d'applications est vide ou obsolète.",
"apps_catalog_update_success": "Le catalogue des applications a été mis à jour !",
"diagnosis_mail_ougoing_port_25_ok": "Le port sortant 25 n'est pas bloqué et le courrier électronique peut être envoyé à d'autres serveurs.",
"diagnosis_description_mail": "Email",
"diagnosis_ports_unreachable": "Le port {port} n'est pas accessible de l'extérieur.",
"diagnosis_ports_ok": "Le port {port} est accessible de l'extérieur.",
"diagnosis_http_could_not_diagnose": "Impossible de diagnostiquer si le domaine est accessible de l'extérieur. Erreur: {error}",
"diagnosis_http_ok": "Le domaine {domain} est accessible de l'extérieur.",
"diagnosis_http_unreachable": "Le domaine {domain} est inaccessible via HTTP de l'extérieur.",
"diagnosis_unknown_categories": "Les catégories suivantes sont inconnues: {categories}",
"migration_description_0013_futureproof_apps_catalog_system": "Migrer vers le nouveau système de catalogue d'applications à l'épreuve du temps"
}

View file

@ -136,8 +136,8 @@
"mail_domain_unknown": "Dominio d'indirizzo mail '{domain:s}' sconosciuto",
"mail_forward_remove_failed": "Impossibile rimuovere la mail inoltrata '{mail:s}'",
"mailbox_used_space_dovecot_down": "Il servizio di posta elettronica Dovecot deve essere attivato se vuoi riportare lo spazio usato dalla posta elettronica",
"maindomain_change_failed": "Impossibile cambiare il dominio principale",
"maindomain_changed": "Il dominio principale è stato cambiato",
"main_domain_change_failed": "Impossibile cambiare il dominio principale",
"main_domain_changed": "Il dominio principale è stato cambiato",
"monitor_disabled": "Il monitoraggio del sistema è stato disattivato",
"monitor_enabled": "Il monitoraggio del sistema è stato attivato",
"monitor_glances_con_failed": "Impossibile collegarsi al server Glances",
@ -402,7 +402,7 @@
"log_user_create": "Aggiungi l'utente '{}'",
"log_user_delete": "Elimina l'utente '{}'",
"log_user_update": "Aggiornate le informazioni dell'utente '{}'",
"log_tools_maindomain": "Rendi '{}' dominio principale",
"log_domain_main_domain": "Rendi '{}' dominio principale",
"log_tools_migrations_migrate_forward": "Migra avanti",
"log_tools_migrations_migrate_backward": "Migra indietro",
"log_tools_postinstall": "Postinstallazione del tuo server YunoHost",

View file

@ -5,7 +5,7 @@
"app_already_installed": "{app:s} es ja installat",
"app_already_up_to_date": "{app:s} es ja a jorn",
"installation_complete": "Installacion acabada",
"app_id_invalid": "Id daplicacion incorrècte",
"app_id_invalid": "ID daplicacion incorrècte",
"app_install_files_invalid": "Fichièrs dinstallacion incorrèctes",
"app_no_upgrade": "Pas cap daplicacion dactualizar",
"app_not_correctly_installed": "{app:s} sembla pas ben installat",
@ -41,15 +41,15 @@
"backup_archive_name_unknown": "Larchiu local de salvagarda apelat « {name:s} »es desconegut",
"action_invalid": "Accion « {action:s} »incorrècta",
"app_argument_choice_invalid": "Causida invalida pel paramètre « {name:s} », cal que siá un de {choices:s}",
"app_argument_invalid": "Valor invalida pel paramètre « {name:s} » : {error:s}",
"app_argument_invalid": "Causissètz una valor invalida pel paramètre « {name:s} » : {error:s}",
"app_argument_required": "Lo paramètre « {name:s}»es requesit",
"app_change_url_failed_nginx_reload": "La reaviada de nginx a fracassat. Vaquí la sortida de «nginx -t»:\n{nginx_errors:s}",
"app_change_url_failed_nginx_reload": "Reaviada de NGINX impossibla. Vaquí la sortida de «nginx -t»:\n{nginx_errors:s}",
"app_change_url_identical_domains": "Lancian e lo novèl coble domeni/camin son identics per {domain:s}{path:s}, pas res a far.",
"app_change_url_success": "LURL de laplicacion {app:s} a cambiat per {domain:s}{path:s}",
"app_change_url_success": "LURL de laplicacion {app:s} es ara {domain:s}{path:s}",
"app_checkurl_is_deprecated": "Packagers /!\\ app checkurl es obsolèt! Utilizatz app register-url a la plaça!",
"app_extraction_failed": "Extraccion dels fichièrs dinstallacion impossibla",
"app_incompatible": "Laplicacion {app} es pas compatibla amb vòstra version de YunoHost",
"app_location_already_used": "Laplicacion « {app}»es ja installada a aqueste emplaçament ({path})",
"app_location_already_used": "Laplicacion « {app}»es ja installada dins ({path})",
"app_manifest_invalid": "Manifest daplicacion incorrècte: {error}",
"app_package_need_update": "Lo paquet de laplicacion {app} deu èsser actualizat per poder seguir los cambiaments de YunoHost",
"app_requirements_checking": "Verificacion dels paquets requesits per {app}…",
@ -180,8 +180,8 @@
"invalid_url_format": "Format dURL pas valid",
"ldap_initialized": "Lannuari LDAP es inicializat",
"license_undefined": "indefinida",
"maindomain_change_failed": "Modificacion impossibla del domeni màger",
"maindomain_changed": "Lo domeni màger es estat modificat",
"main_domain_change_failed": "Modificacion impossibla del domeni màger",
"main_domain_changed": "Lo domeni màger es estat modificat",
"migrate_tsig_end": "La migracion cap a hmac-sha512 es acabada",
"migrate_tsig_wait_2": "2 minutas…",
"migrate_tsig_wait_3": "1 minuta…",
@ -440,7 +440,7 @@
"log_user_create": "Ajustar lutilizaire « {} »",
"log_user_delete": "Levar lutilizaire « {} »",
"log_user_update": "Actualizar las informacions a lutilizaire « {} »",
"log_tools_maindomain": "Far venir « {} » lo domeni màger",
"log_domain_main_domain": "Far venir « {} » lo domeni màger",
"log_tools_migrations_migrate_forward": "Migrar",
"log_tools_migrations_migrate_backward": "Tornar en arrièr",
"log_tools_postinstall": "Realizar la post installacion del servidor YunoHost",
@ -612,5 +612,77 @@
"migrations_must_provide_explicit_targets": "Devètz fornir una cibla explicita quand utilizatz using --skip o --force-rerun",
"migrations_exclusive_options": "--auto, --skip, e --force-rerun son las opcions exclusivas.",
"migrations_failed_to_load_migration": "Cargament impossible de la migracion {id} : {error}",
"migrations_already_ran": "Aquelas migracions sexecutèron ja : {ids}"
"migrations_already_ran": "Aquelas migracions sexecutèron ja : {ids}",
"diagnosis_basesystem_ynh_main_version": "Lo servidor fonciona amb YunoHost {main_version} ({repo})",
"migrations_dependencies_not_satisfied": "Executatz aquestas migracions : « {dependencies_id} », abans la migracion {id}.",
"migrations_no_such_migration": "I a pas cap de migracion apelada « {id} »",
"migrations_not_pending_cant_skip": "Aquestas migracions son pas en espèra, las podètz pas doncas ignorar : {ids}",
"app_action_broke_system": "Aquesta accion sembla aver copat de servicis importants : {services}",
"diagnosis_display_tip_web": "Podètz anar a la seccion Diagnostic (dins lecran dacuèlh) per veire los problèmas trobats.",
"diagnosis_ip_no_ipv6": "Lo servidor a pas dadreça IPv5 activa.",
"diagnosis_ip_not_connected_at_all": "Lo servidor sembla pas connectat a Internet ?!",
"diagnosis_security_all_good": "Cap de vulnerabilitat de seguretat critica pas trobada.",
"diagnosis_description_regenconf": "Configuracion sistèma",
"diagnosis_http_ok": "Lo domeni {domain} accessible de lexterior.",
"app_full_domain_unavailable": "Aquesta aplicacion a dèsser installada sul seu pròpri domeni, mas i a dautras aplicacions installadas sus aqueste domeni « {domain} ». Podètz utilizar allòc un josdomeni dedicat a aquesta aplicacion.",
"app_upgrade_stopped": "Lactualizacion de totas las aplicacions ses arrestada per evitar de possibles damatges pramor quèra pas possible dactualizar una aplicacion",
"diagnosis_dns_bad_conf": "Configuracion DNS incorrècta o inexistenta pel domeni {domain} (categoria {category})",
"diagnosis_ram_verylow": "Lo sistèma a solament {available_abs_MB} Mo ({available_percent}%) de memòria RAM disponibla ! (dun total de {total_abs_MB} MB)",
"diagnosis_ram_ok": "Lo sistèma a encara {available_abs_MB} Mo ({available_percent}%) de memòria RAM disponibla dun total de {total_abs_MB} MB).",
"permission_already_allowed": "Lo grop « {group} » a ja la permission « {permission} » activada",
"permission_already_disallowed": "Lo grop « {group} » a ja la permission « {permission} » desactivada",
"permission_cannot_remove_main": "La supression duna permission màger es pas autorizada",
"log_permission_url": "Actualizacion de lURL ligada a la permission « {} »",
"app_install_failed": "Installacion impossibla de {app} : {error}",
"app_install_script_failed": "Una error ses producha en installar lo script de laplicacion",
"migration_0011_failed_to_remove_stale_object": "Supression impossibla dun objècte obsolèt {dn} : {error}",
"apps_already_up_to_date": "Totas las aplicacions son ja al jorn",
"app_remove_after_failed_install": "Supression de laplicacion aprèp fracàs de linstallacion…",
"group_already_exist": "Lo grop {group} existís ja",
"group_already_exist_on_system": "Lo grop {group} existís ja dins lo sistèma de grops",
"group_user_not_in_group": "Lutilizaire {user} es pas dins lo grop {group}",
"log_user_permission_reset": "Restablir la permission « {} »",
"user_already_exists": "Lutilizaire {user} existís ja",
"diagnosis_basesystem_host": "Lo servidor fonciona amb Debian {debian_version}.",
"diagnosis_basesystem_kernel": "Lo servidor fonciona amb lo nuclèu Linuxl {kernel_version}",
"diagnosis_basesystem_ynh_single_version": "{0} version : {1} ({2})",
"diagnosis_basesystem_ynh_inconsistent_versions": "Utilizatz de versions inconsistentas dels paquets de YunoHost… probablament a causa d'una actualizacion fracassada o parciala.",
"diagnosis_display_tip_cli": "Podètz executar « yunohost diagnosis show --issues » per mostrar las errors trobadas.",
"diagnosis_ignored_issues": "(+ {nb_ignored} problèma(es) ignorat(s))",
"diagnosis_everything_ok": "Tot sembla corrècte per {category} !",
"diagnosis_ip_connected_ipv4": "Lo servidor es connectat a Internet via IPv4 !",
"diagnosis_ip_no_ipv4": "Lo servidor a pas dadreça IPv4 activa.",
"diagnosis_ip_connected_ipv6": "Lo servidor es connectat a Internet via IPv6 !",
"diagnosis_ip_dnsresolution_working": "La resolucion del nom de domeni fonciona !",
"diagnosis_dns_good_conf": "Bona configuracion DNS pel domeni {domain} (categoria {category})",
"diagnosis_failed_for_category": "Lo diagnostic a reüssit per la categoria « {category} » : {error}",
"diagnosis_cache_still_valid": "(Memòria cache totjorn valida pel diagnostic {category}. Cap dautre diagnostic pel moment !)",
"diagnosis_found_errors": "{errors} errors importantas trobadas ligadas a {category} !",
"diagnosis_services_good_status": "Lo servici {service} es {status} coma previst !",
"diagnosis_services_bad_status": "Lo servici {service} es {status} :/",
"diagnosis_swap_ok": "Lo sistèma a {total_MB} MB descambi !",
"diagnosis_regenconf_allgood": "Totes los fichièrs de configuracion son confòrmes a la configuracion recomandada !",
"diagnosis_regenconf_manually_modified": "Lo fichièr de configuracion {file} foguèt modificat manualament.",
"diagnosis_regenconf_manually_modified_details": "Es probablament bon tan que sabètz çò que fasètz ;) !",
"diagnosis_regenconf_nginx_conf_broken": "La configuracion de nginx sembla èsser copada !",
"diagnosis_security_vulnerable_to_meltdown": "Semblatz èsser vulnerable a la vulnerabilitat de seguretat critica de Meltdown",
"diagnosis_description_basesystem": "Sistèma de basa",
"diagnosis_description_ip": "Connectivitat Internet",
"diagnosis_description_dnsrecords": "Enregistraments DNS",
"diagnosis_description_services": "Verificacion destat de servicis",
"diagnosis_description_systemresources": "Resorgas sistèma",
"diagnosis_description_ports": "Exposicion dels pòrts",
"diagnosis_description_http": "Exposicion HTTP",
"diagnosis_description_security": "Verificacion de seguretat",
"diagnosis_ports_unreachable": "Lo pòrt {port} es pas accessible de lexterior.",
"diagnosis_ports_ok": "Lo pòrt {port} es accessible de lexterior.",
"diagnosis_http_unreachable": "Lo domeni {domain} es pas accessible via HTTP de lexterior.",
"diagnosis_unknown_categories": "La categorias seguentas son desconegudas : {categories}",
"diagnosis_ram_low": "Lo sistèma a {available_abs_MB} Mo ({available_percent}%) de memòria RAM disponibla dun total de {total_abs_MB} MB). Atencion.",
"diagnosis_regenconf_manually_modified_debian": "Lo fichier de configuracion {file} foguèt modificat manualament respècte al fichièr per defaut de Debian.",
"log_permission_create": "Crear la permission « {} »",
"log_permission_delete": "Suprimir la permission « {} »",
"log_user_group_create": "Crear lo grop « {} »",
"log_user_permission_update": "Actualizacion dels accèsses per la permission « {} »",
"operation_interrupted": "Loperacion es estada interrompuda manualament ?"
}

View file

@ -74,8 +74,8 @@
"mail_alias_remove_failed": "Não foi possível remover a etiqueta de correio '{mail:s}'",
"mail_domain_unknown": "Domínio de endereço de correio '{domain:s}' inválido. Por favor, usa um domínio administrado per esse servidor.",
"mail_forward_remove_failed": "Não foi possível remover o reencaminhamento de correio '{mail:s}'",
"maindomain_change_failed": "Incapaz alterar o domínio raiz",
"maindomain_changed": "Domínio raiz alterado com êxito",
"main_domain_change_failed": "Incapaz alterar o domínio raiz",
"main_domain_changed": "Domínio raiz alterado com êxito",
"monitor_disabled": "Monitorização do servidor parada com êxito",
"monitor_enabled": "Monitorização do servidor ativada com êxito",
"monitor_glances_con_failed": "Não foi possível ligar ao servidor Glances",

View file

@ -1,3 +1,11 @@
{
"password_too_simple_1": "Lösenordet måste bestå av minst åtta tecken"
"password_too_simple_1": "Lösenordet måste bestå av minst åtta tecken",
"app_action_broke_system": "Åtgärden verkar ha fått följande viktiga tjänster att haverera: {services}",
"already_up_to_date": "Ingenting att göra. Allt är redan uppdaterat.",
"admin_password": "Administratörslösenord",
"admin_password_too_long": "Välj gärna ett lösenord som inte innehåller fler än 127 tecken",
"admin_password_change_failed": "Kan inte byta lösenord",
"action_invalid": "Ej tillåten åtgärd '{action:s}'",
"admin_password_changed": "Administratörskontots lösenord ändrades",
"aborting": "Avbryter."
}

View file

@ -1 +1,3 @@
{}
{
"password_too_simple_1": "Şifre en az 8 karakter uzunluğunda olmalı"
}

View file

@ -33,15 +33,14 @@ import re
import urlparse
import subprocess
import glob
import pwd
import grp
import urllib
from collections import OrderedDict
from datetime import datetime
from moulinette import msignals, m18n, msettings
from moulinette.utils.log import getActionLogger
from moulinette.utils.filesystem import read_json, read_toml, read_yaml, write_to_json
from moulinette.utils.network import download_json
from moulinette.utils.filesystem import read_file, read_json, read_toml, read_yaml, write_to_file, write_to_json, write_to_yaml, chmod, chown, mkdir
from yunohost.service import service_log, service_status, _run_service_command
from yunohost.utils import packages
@ -50,12 +49,16 @@ from yunohost.log import is_unit_operation, OperationLogger
logger = getActionLogger('yunohost.app')
REPO_PATH = '/var/cache/yunohost/repo'
APPS_PATH = '/usr/share/yunohost/apps'
APPS_SETTING_PATH = '/etc/yunohost/apps/'
INSTALL_TMP = '/var/cache/yunohost'
APP_TMP_FOLDER = INSTALL_TMP + '/from_file'
APPSLISTS_JSON = '/etc/yunohost/appslists.json'
APPS_CATALOG_CACHE = '/var/cache/yunohost/repo'
APPS_CATALOG_CONF = '/etc/yunohost/apps_catalog.yml'
APPS_CATALOG_CRON_PATH = "/etc/cron.daily/yunohost-fetch-apps-catalog"
APPS_CATALOG_API_VERSION = 1
APPS_CATALOG_DEFAULT_URL = "https://app.yunohost.org/default"
re_github_repo = re.compile(
r'^(http[s]?://|git@)github.com[/:]'
@ -68,166 +71,6 @@ re_app_instance_name = re.compile(
)
def app_listlists():
"""
List fetched lists
"""
# Migrate appslist system if needed
# XXX move to a migration when those are implemented
if _using_legacy_appslist_system():
_migrate_appslist_system()
# Get the list
appslist_list = _read_appslist_list()
# Convert 'lastUpdate' timestamp to datetime
for name, infos in appslist_list.items():
if infos["lastUpdate"] is None:
infos["lastUpdate"] = 0
infos["lastUpdate"] = datetime.utcfromtimestamp(infos["lastUpdate"])
return appslist_list
def app_fetchlist(url=None, name=None):
"""
Fetch application list(s) from app server. By default, fetch all lists.
Keyword argument:
name -- Name of the list
url -- URL of remote JSON list
"""
if url and not url.endswith(".json"):
raise YunohostError("This is not a valid application list url. It should end with .json.")
# If needed, create folder where actual appslists are stored
if not os.path.exists(REPO_PATH):
os.makedirs(REPO_PATH)
# Migrate appslist system if needed
# XXX move that to a migration once they are finished
if _using_legacy_appslist_system():
_migrate_appslist_system()
# Read the list of appslist...
appslists = _read_appslist_list()
# Determine the list of appslist to be fetched
appslists_to_be_fetched = []
# If a url and and a name is given, try to register new list,
# the fetch only this list
if url is not None:
if name:
operation_logger = OperationLogger('app_fetchlist')
operation_logger.start()
_register_new_appslist(url, name)
# Refresh the appslists dict
appslists = _read_appslist_list()
appslists_to_be_fetched = [name]
operation_logger.success()
else:
raise YunohostError('custom_appslist_name_required')
# If a name is given, look for an appslist with that name and fetch it
elif name is not None:
if name not in appslists.keys():
raise YunohostError('appslist_unknown', appslist=name)
else:
appslists_to_be_fetched = [name]
# Otherwise, fetch all lists
else:
appslists_to_be_fetched = appslists.keys()
import requests # lazy loading this module for performance reasons
# Fetch all appslists to be fetched
for name in appslists_to_be_fetched:
url = appslists[name]["url"]
logger.debug("Attempting to fetch list %s at %s" % (name, url))
# Download file
try:
appslist_request = requests.get(url, timeout=30)
except requests.exceptions.SSLError:
logger.error(m18n.n('appslist_retrieve_error',
appslist=name,
error="SSL connection error"))
continue
except Exception as e:
logger.error(m18n.n('appslist_retrieve_error',
appslist=name,
error=str(e)))
continue
if appslist_request.status_code != 200:
logger.error(m18n.n('appslist_retrieve_error',
appslist=name,
error="Server returned code %s " %
str(appslist_request.status_code)))
continue
# Validate app list format
# TODO / Possible improvement : better validation for app list (check
# that json fields actually look like an app list and not any json
# file)
appslist = appslist_request.text
try:
json.loads(appslist)
except ValueError as e:
logger.error(m18n.n('appslist_retrieve_bad_format',
appslist=name))
continue
# Write app list to file
list_file = '%s/%s.json' % (REPO_PATH, name)
try:
with open(list_file, "w") as f:
f.write(appslist)
except Exception as e:
raise YunohostError("Error while writing appslist %s: %s" % (name, str(e)), raw_msg=True)
now = int(time.time())
appslists[name]["lastUpdate"] = now
logger.success(m18n.n('appslist_fetched', appslist=name))
# Write updated list of appslist
_write_appslist_list(appslists)
@is_unit_operation()
def app_removelist(operation_logger, name):
"""
Remove list from the repositories
Keyword argument:
name -- Name of the list to remove
"""
appslists = _read_appslist_list()
# Make sure we know this appslist
if name not in appslists.keys():
raise YunohostError('appslist_unknown', appslist=name)
operation_logger.start()
# Remove json
json_path = '%s/%s.json' % (REPO_PATH, name)
if os.path.exists(json_path):
os.remove(json_path)
# Forget about this appslist
del appslists[name]
_write_appslist_list(appslists)
logger.success(m18n.n('appslist_removed', appslist=name))
def app_list(filter=None, raw=False, installed=False, with_backup=False):
"""
List apps
@ -243,28 +86,10 @@ def app_list(filter=None, raw=False, installed=False, with_backup=False):
"""
installed = with_backup or installed
app_dict = {}
list_dict = {} if raw else []
appslists = _read_appslist_list()
for appslist in appslists.keys():
json_path = "%s/%s.json" % (REPO_PATH, appslist)
# If we don't have the json yet, try to fetch it
if not os.path.exists(json_path):
app_fetchlist(name=appslist)
# If it now exist
if os.path.exists(json_path):
appslist_content = read_json(json_path)
for app, info in appslist_content.items():
if app not in app_dict:
info['repository'] = appslist
app_dict[app] = info
else:
logger.warning("Uh there's no data for applist '%s' ... (That should be just a temporary issue?)" % appslist)
# Get app list from catalog cache
app_dict = _load_apps_catalog()
# Get app list from the app settings directory
for app in os.listdir(APPS_SETTING_PATH):
@ -315,6 +140,7 @@ def app_list(filter=None, raw=False, installed=False, with_backup=False):
# dirty: we used to have manifest containing multi_instance value in form of a string
# but we've switched to bool, this line ensure retrocompatibility
app_info_dict["manifest"]["multi_instance"] = is_true(app_info_dict["manifest"].get("multi_instance", False))
list_dict[app_id] = app_info_dict
@ -405,6 +231,7 @@ def app_map(app=None, raw=False, user=None):
app -- Specific app to map
"""
from yunohost.permission import user_permission_list
apps = []
@ -559,7 +386,7 @@ def app_change_url(operation_logger, app, domain, path):
# Retrieve arguments list for change_url script
# TODO: Allow to specify arguments
args_odict = _parse_args_from_manifest(manifest, 'change_url')
args_list = [ value[0] for value in args_odict.values() ]
args_list = [value[0] for value in args_odict.values()]
args_list.append(app)
# Prepare env. var. to pass to script
@ -612,7 +439,7 @@ def app_change_url(operation_logger, app, domain, path):
app_setting(app, 'domain', value=domain)
app_setting(app, 'path', value=path)
app_ssowatconf()
permission_update(app, permission="main", add_url=[domain + path], remove_url=[old_domain + old_path], sync_perm=True)
# avoid common mistakes
if _run_service_command("reload", "nginx") is False:
@ -656,15 +483,15 @@ def app_upgrade(app=[], url=None, file=None):
if not apps:
# FIXME : not sure what's supposed to happen if there is a url and a file but no apps...
if not url and not file:
apps = [app["id"] for app in app_list(installed=True)["apps"]]
apps = [app_["id"] for app_ in app_list(installed=True)["apps"]]
elif not isinstance(app, list):
apps = [app]
# Remove possible duplicates
apps = [app for i,app in enumerate(apps) if apps not in apps[:i]]
apps = [app_ for i, app_ in enumerate(apps) if app_ not in apps[:i]]
# Abort if any of those app is in fact not installed..
for app in [app for app in apps if not _is_installed(app)]:
for app in [app_ for app_ in apps if not _is_installed(app_)]:
raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id())
if len(apps) == 0:
@ -706,7 +533,7 @@ def app_upgrade(app=[], url=None, file=None):
# Retrieve arguments list for upgrade script
# TODO: Allow to specify arguments
args_odict = _parse_args_from_manifest(manifest, 'upgrade')
args_list = [ value[0] for value in args_odict.values() ]
args_list = [value[0] for value in args_odict.values()]
args_list.append(app_instance_name)
# Prepare env. var. to pass to script
@ -721,6 +548,9 @@ def app_upgrade(app=[], url=None, file=None):
operation_logger = OperationLogger('app_upgrade', related_to, env=env_dict)
operation_logger.start()
# Attempt to patch legacy helpers ...
_patch_legacy_helpers(extracted_app_folder)
# Apply dirty patch to make php5 apps compatible with php7
_patch_php5(extracted_app_folder)
@ -864,8 +694,6 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
if answer.upper() != "Y":
raise YunohostError("aborting")
raw_app_list = app_list(raw=True)
if app in raw_app_list or ('@' in app) or ('http://' in app) or ('https://' in app):
@ -926,7 +754,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
args_dict = {} if not args else \
dict(urlparse.parse_qsl(args, keep_blank_values=True))
args_odict = _parse_args_from_manifest(manifest, 'install', args=args_dict)
args_list = [ value[0] for value in args_odict.values() ]
args_list = [value[0] for value in args_odict.values()]
args_list.append(app_instance_name)
# Validate domain / path availability for webapps
@ -944,8 +772,8 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
# Tell the operation_logger to redact all password-type args
# Also redact the % escaped version of the password that might appear in
# the 'args' section of metadata (relevant for password with non-alphanumeric char)
data_to_redact = [ value[0] for value in args_odict.values() if value[1] == "password" ]
data_to_redact += [ urllib.quote(data) for data in data_to_redact if urllib.quote(data) != data ]
data_to_redact = [value[0] for value in args_odict.values() if value[1] == "password"]
data_to_redact += [urllib.quote(data) for data in data_to_redact if urllib.quote(data) != data]
operation_logger.data_to_redact.extend(data_to_redact)
operation_logger.related_to = [s for s in operation_logger.related_to if s[0] != "app"]
@ -969,6 +797,9 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu
app_settings['install_time'] = status['installed_at']
_set_app_settings(app_instance_name, app_settings)
# Attempt to patch legacy helpers ...
_patch_legacy_helpers(extracted_app_folder)
# Apply dirty patch to make php5 apps compatible with php7
_patch_php5(extracted_app_folder)
@ -1200,9 +1031,12 @@ def app_remove(operation_logger, app):
# TODO: display fail messages from script
try:
shutil.rmtree('/tmp/yunohost_remove')
except:
except Exception:
pass
# Attempt to patch legacy helpers ...
_patch_legacy_helpers(app_setting_path)
# Apply dirty patch to make php5 apps compatible with php7 (e.g. the remove
# script might date back from jessie install)
_patch_php5(app_setting_path)
@ -1318,28 +1152,6 @@ def app_clearaccess(apps):
return {'allowed_users': output}
def app_debug(app):
"""
Display debug informations for an app
Keyword argument:
app
"""
manifest = _get_manifest_of_app(os.path.join(APPS_SETTING_PATH, app))
return {
'name': manifest['id'],
'label': manifest['name'],
'services': [{
"name": x,
"logs": [{
"file_name": y,
"file_content": "\n".join(z),
} for (y, z) in sorted(service_log(x).items(), key=lambda x: x[0])],
} for x in sorted(manifest.get("services", []))]
}
@is_unit_operation()
def app_makedefault(operation_logger, app, domain=None):
"""
@ -1426,24 +1238,6 @@ def app_setting(app, key, value=None, delete=False):
user_permission_update(app + ".main", remove="all_users", add="visitors")
def app_checkport(port):
"""
Check availability of a local port
Keyword argument:
port -- Port to check
"""
# This import cannot be moved on top of file because it create a recursive
# import...
from yunohost.tools import tools_port_available
if tools_port_available(port):
logger.success(m18n.n('port_available', port=int(port)))
else:
raise YunohostError('port_unavailable', port=int(port))
def app_register_url(app, domain, path):
"""
Book/register a web path for a given app
@ -1487,93 +1281,6 @@ def app_register_url(app, domain, path):
app_setting(app, 'path', value=path)
def app_checkurl(url, app=None):
"""
Check availability of a web path
Keyword argument:
url -- Url to check
app -- Write domain & path to app settings for further checks
"""
logger.error("Packagers /!\\ : 'app checkurl' is deprecated ! Please use the helper 'ynh_webpath_register' instead !")
from yunohost.domain import domain_list, _normalize_domain_path
if "https://" == url[:8]:
url = url[8:]
elif "http://" == url[:7]:
url = url[7:]
if url[-1:] != '/':
url = url + '/'
domain = url[:url.index('/')]
path = url[url.index('/'):]
installed = False
domain, path = _normalize_domain_path(domain, path)
apps_map = app_map(raw=True)
if domain not in domain_list()['domains']:
raise YunohostError('domain_unknown')
if domain in apps_map:
# Loop through apps
for p, a in apps_map[domain].items():
# Skip requested app checking
if app is not None and a['id'] == app:
installed = True
continue
if path == p:
raise YunohostError('app_location_already_used', app=a["id"], path=path)
# can't install "/a/b/" if "/a/" exists
elif path.startswith(p) or p.startswith(path):
raise YunohostError('app_location_install_failed', other_path=p, other_app=a['id'])
if app is not None and not installed:
app_setting(app, 'domain', value=domain)
app_setting(app, 'path', value=path)
def app_initdb(user, password=None, db=None, sql=None):
"""
Create database and initialize it with optionnal attached script
Keyword argument:
db -- DB name (user unless set)
user -- Name of the DB user
password -- Password of the DB (generated unless set)
sql -- Initial SQL file
"""
logger.error("Packagers /!\\ : 'app initdb' is deprecated ! Please use the helper 'ynh_mysql_setup_db' instead !")
if db is None:
db = user
return_pwd = False
if password is None:
password = random_password(12)
return_pwd = True
mysql_root_pwd = open('/etc/yunohost/mysql').read().rstrip()
mysql_command = 'mysql -u root -p%s -e "CREATE DATABASE %s ; GRANT ALL PRIVILEGES ON %s.* TO \'%s\'@localhost IDENTIFIED BY \'%s\';"' % (mysql_root_pwd, db, db, user, password)
if os.system(mysql_command) != 0:
raise YunohostError('mysql_db_creation_failed')
if sql is not None:
if os.system('mysql -u %s -p%s %s < %s' % (user, password, db, sql)) != 0:
raise YunohostError('mysql_db_init_failed')
if return_pwd:
return password
logger.success(m18n.n('mysql_db_initialized'))
def app_ssowatconf():
"""
Regenerate SSOwat configuration file
@ -1676,7 +1383,8 @@ def app_ssowatconf():
for domain in domains:
skipped_urls.extend([domain + '/yunohost/admin', domain + '/yunohost/api'])
# Authorize ACME challenge url
# Authorize ynh remote diagnosis, ACME challenge and mail autoconfig urls
skipped_regex.append("^[^/]*/%.well%-known/ynh%-diagnosis/.*$")
skipped_regex.append("^[^/]*/%.well%-known/acme%-challenge/.*$")
skipped_regex.append("^[^/]*/%.well%-known/autoconfig/mail/config%-v1%.1%.xml.*$")
@ -2129,8 +1837,7 @@ def _get_app_config_panel(app_id):
"panel": [],
}
panels = filter(lambda (key, value): key not in ("name", "version")
and isinstance(value, OrderedDict),
panels = filter(lambda (key, value): key not in ("name", "version") and isinstance(value, OrderedDict),
toml_config_panel.items())
for key, value in panels:
@ -2140,8 +1847,7 @@ def _get_app_config_panel(app_id):
"sections": [],
}
sections = filter(lambda (k, v): k not in ("name",)
and isinstance(v, OrderedDict),
sections = filter(lambda (k, v): k not in ("name",) and isinstance(v, OrderedDict),
value.items())
for section_key, section_value in sections:
@ -2151,8 +1857,7 @@ def _get_app_config_panel(app_id):
"options": [],
}
options = filter(lambda (k, v): k not in ("name",)
and isinstance(v, OrderedDict),
options = filter(lambda (k, v): k not in ("name",) and isinstance(v, OrderedDict),
section_value.items())
for option_key, option_value in options:
@ -2688,38 +2393,14 @@ def _check_manifest_requirements(manifest, app_instance_name):
"""Check if required packages are met from the manifest"""
requirements = manifest.get('requirements', dict())
# FIXME: Deprecate min_version key
if 'min_version' in manifest:
requirements['yunohost'] = '>> {0}'.format(manifest['min_version'])
logger.debug("the manifest key 'min_version' is deprecated, "
"use 'requirements' instead.")
# Validate multi-instance app
if is_true(manifest.get('multi_instance', False)):
# Handle backward-incompatible change introduced in yunohost >= 2.3.6
# See https://github.com/YunoHost/issues/issues/156
yunohost_req = requirements.get('yunohost', None)
if (not yunohost_req or
not packages.SpecifierSet(yunohost_req) & '>= 2.3.6'):
raise YunohostError('{0}{1}'.format(
m18n.g('colon', m18n.n('app_incompatible'), app=app_instance_name),
m18n.n('app_package_need_update', app=app_instance_name)))
elif not requirements:
if not requirements:
return
logger.debug(m18n.n('app_requirements_checking', app=app_instance_name))
# Retrieve versions of each required package
try:
versions = packages.get_installed_version(
*requirements.keys(), strict=True, as_dict=True)
except packages.PackageException as e:
raise YunohostError('app_requirements_failed', error=str(e), app=app_instance_name)
# Iterate over requirements
for pkgname, spec in requirements.items():
version = versions[pkgname]
if version not in packages.SpecifierSet(spec):
if not packages.meets_version_specifier(pkgname, spec):
raise YunohostError('app_requirements_unmeet',
pkgname=pkgname, version=version,
spec=spec, app=app_instance_name)
@ -2992,151 +2673,160 @@ def _parse_app_instance_name(app_instance_name):
return (appid, app_instance_nb)
def _using_legacy_appslist_system():
#
# ############################### #
# Applications list management #
# ############################### #
#
def _initialize_apps_catalog_system():
"""
Return True if we're using the old fetchlist scheme.
This is determined by the presence of some cron job yunohost-applist-foo
This function is meant to intialize the apps_catalog system with YunoHost's default app catalog.
It also creates the cron job that will update the list every day
"""
return glob.glob("/etc/cron.d/yunohost-applist-*") != []
def _migrate_appslist_system():
"""
Migrate from the legacy fetchlist system to the new one
"""
legacy_crons = glob.glob("/etc/cron.d/yunohost-applist-*")
for cron_path in legacy_crons:
appslist_name = os.path.basename(cron_path).replace("yunohost-applist-", "")
logger.debug(m18n.n('appslist_migrating', appslist=appslist_name))
# Parse appslist url in cron
cron_file_content = open(cron_path).read().strip()
appslist_url_parse = re.search("-u (https?://[^ ]+)", cron_file_content)
# Abort if we did not find an url
if not appslist_url_parse or not appslist_url_parse.groups():
# Bkp the old cron job somewhere else
bkp_file = "/etc/yunohost/%s.oldlist.bkp" % appslist_name
os.rename(cron_path, bkp_file)
# Notice the user
logger.warning(m18n.n('appslist_could_not_migrate',
appslist=appslist_name,
bkp_file=bkp_file))
# Otherwise, register the list and remove the legacy cron
else:
appslist_url = appslist_url_parse.groups()[0]
try:
_register_new_appslist(appslist_url, appslist_name)
# Might get an exception if two legacy cron jobs conflict
# in terms of url...
except Exception as e:
logger.error(str(e))
# Bkp the old cron job somewhere else
bkp_file = "/etc/yunohost/%s.oldlist.bkp" % appslist_name
os.rename(cron_path, bkp_file)
# Notice the user
logger.warning(m18n.n('appslist_could_not_migrate',
appslist=appslist_name,
bkp_file=bkp_file))
else:
os.remove(cron_path)
def _install_appslist_fetch_cron():
cron_job_file = "/etc/cron.daily/yunohost-fetch-appslists"
logger.debug("Installing appslist fetch cron job")
default_apps_catalog_list = [{"id": "default", "url": APPS_CATALOG_DEFAULT_URL}]
cron_job = []
cron_job.append("#!/bin/bash")
# We add a random delay between 0 and 60 min to avoid every instance fetching
# the appslist at the same time every night
# the apps catalog at the same time every night
cron_job.append("(sleep $((RANDOM%3600));")
cron_job.append("yunohost app fetchlist > /dev/null 2>&1) &")
with open(cron_job_file, "w") as f:
f.write('\n'.join(cron_job))
_set_permissions(cron_job_file, "root", "root", 0o755)
# FIXME - Duplicate from certificate.py, should be moved into a common helper
# thing...
def _set_permissions(path, user, group, permissions):
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(path, uid, gid)
os.chmod(path, permissions)
def _read_appslist_list():
"""
Read the json corresponding to the list of appslists
"""
# If file does not exists yet, return empty dict
if not os.path.exists(APPSLISTS_JSON):
return {}
# Read file content
with open(APPSLISTS_JSON, "r") as f:
appslists_json = f.read()
# Parse json, throw exception if what we got from file is not a valid json
cron_job.append("yunohost tools update --apps > /dev/null) &")
try:
appslists = json.loads(appslists_json)
except ValueError:
raise YunohostError('appslist_corrupted_json', filename=APPSLISTS_JSON)
logger.debug("Initializing apps catalog system with YunoHost's default app list")
write_to_yaml(APPS_CATALOG_CONF, default_apps_catalog_list)
return appslists
def _write_appslist_list(appslist_lists):
"""
Update the json containing list of appslists
"""
# Write appslist list
try:
with open(APPSLISTS_JSON, "w") as f:
json.dump(appslist_lists, f)
logger.debug("Installing apps catalog fetch daily cron job")
write_to_file(APPS_CATALOG_CRON_PATH, '\n'.join(cron_job))
chown(APPS_CATALOG_CRON_PATH, uid="root", gid="root")
chmod(APPS_CATALOG_CRON_PATH, 0o755)
except Exception as e:
raise YunohostError("Error while writing list of appslist %s: %s" %
(APPSLISTS_JSON, str(e)), raw_msg=True)
raise YunohostError("Could not initialize the apps catalog system... : %s" % str(e))
logger.success(m18n.n("apps_catalog_init_success"))
def _register_new_appslist(url, name):
def _read_apps_catalog_list():
"""
Add a new appslist to be fetched regularly.
Raise an exception if url or name conflicts with an existing list.
Read the json corresponding to the list of apps catalogs
"""
appslist_list = _read_appslist_list()
# Legacy code - can be removed after moving to buster (if the migration got merged before buster)
if os.path.exists('/etc/yunohost/appslists.json'):
from yunohost.tools import _get_migration_by_name
migration = _get_migration_by_name("futureproof_apps_catalog_system")
migration.migrate()
# Check if name conflicts with an existing list
if name in appslist_list:
raise YunohostError('appslist_name_already_tracked', name=name)
try:
list_ = read_yaml(APPS_CATALOG_CONF)
# Support the case where file exists but is empty
# by returning [] if list_ is None
return list_ if list_ else []
except Exception as e:
raise YunohostError("Could not read the apps_catalog list ... : %s" % str(e))
# Check if url conflicts with an existing list
known_appslist_urls = [appslist["url"] for _, appslist in appslist_list.items()]
if url in known_appslist_urls:
raise YunohostError('appslist_url_already_tracked', url=url)
def _actual_apps_catalog_api_url(base_url):
logger.debug("Registering new appslist %s at %s" % (name, url))
return "{base_url}/v{version}/apps.json".format(base_url=base_url, version=APPS_CATALOG_API_VERSION)
appslist_list[name] = {
"url": url,
"lastUpdate": None
}
_write_appslist_list(appslist_list)
def _update_apps_catalog():
"""
Fetches the json for each apps_catalog and update the cache
_install_appslist_fetch_cron()
apps_catalog_list is for example :
[ {"id": "default", "url": "https://app.yunohost.org/default/"} ]
Then for each apps_catalog, the actual json URL to be fetched is like :
https://app.yunohost.org/default/vX/apps.json
And store it in :
/var/cache/yunohost/repo/default.json
"""
apps_catalog_list = _read_apps_catalog_list()
logger.info(m18n.n("apps_catalog_updating"))
# Create cache folder if needed
if not os.path.exists(APPS_CATALOG_CACHE):
logger.debug("Initialize folder for apps catalog cache")
mkdir(APPS_CATALOG_CACHE, mode=0o750, parents=True, uid='root')
for apps_catalog in apps_catalog_list:
apps_catalog_id = apps_catalog["id"]
actual_api_url = _actual_apps_catalog_api_url(apps_catalog["url"])
# Fetch the json
try:
apps_catalog_content = download_json(actual_api_url)
except Exception as e:
raise YunohostError("apps_catalog_failed_to_download", apps_catalog=apps_catalog_id, error=str(e))
# Remember the apps_catalog api version for later
apps_catalog_content["from_api_version"] = APPS_CATALOG_API_VERSION
# Save the apps_catalog data in the cache
cache_file = "{cache_folder}/{list}.json".format(cache_folder=APPS_CATALOG_CACHE, list=apps_catalog_id)
try:
write_to_json(cache_file, apps_catalog_content)
except Exception as e:
raise YunohostError("Unable to write cache data for %s apps_catalog : %s" % (apps_catalog_id, str(e)))
logger.success(m18n.n("apps_catalog_update_success"))
def _load_apps_catalog():
"""
Read all the apps catalog cache files and build a single dict (app_dict)
corresponding to all known apps in all indexes
"""
app_dict = {}
for apps_catalog_id in [L["id"] for L in _read_apps_catalog_list()]:
# Let's load the json from cache for this catalog
cache_file = "{cache_folder}/{list}.json".format(cache_folder=APPS_CATALOG_CACHE, list=apps_catalog_id)
try:
apps_catalog_content = read_json(cache_file) if os.path.exists(cache_file) else None
except Exception as e:
raise ("Unable to read cache for apps_catalog %s : %s" % (apps_catalog_id, str(e)))
# Check that the version of the data matches version ....
# ... otherwise it means we updated yunohost in the meantime
# and need to update the cache for everything to be consistent
if not apps_catalog_content or apps_catalog_content.get("from_api_version") != APPS_CATALOG_API_VERSION:
logger.info(m18n.n("apps_catalog_obsolete_cache"))
_update_apps_catalog()
apps_catalog_content = read_json(cache_file)
del apps_catalog_content["from_api_version"]
# Add apps from this catalog to the output
for app, info in apps_catalog_content.items():
# (N.B. : there's a small edge case where multiple apps catalog could be listing the same apps ...
# in which case we keep only the first one found)
if app in app_dict:
logger.warning("Duplicate app %s found between apps catalog %s and %s" % (app, apps_catalog_id, app_dict[app]['repository']))
continue
info['repository'] = apps_catalog_id
app_dict[app] = info
return app_dict
#
# ############################### #
# Small utilities #
# ############################### #
#
def is_true(arg):
@ -3256,3 +2946,76 @@ def _patch_php5(app_folder):
"-e 's@php5@php7.0@g' " \
"%s" % filename
os.system(c)
def _patch_legacy_helpers(app_folder):
files_to_patch = []
files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder))
files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder))
stuff_to_replace = {
# Replace
# sudo yunohost app initdb $db_user -p $db_pwd
# by
# ynh_mysql_setup_db --db_user=$db_user --db_name=$db_user --db_pwd=$db_pwd
"yunohost app initdb": (
r"(sudo )?yunohost app initdb \"?(\$\{?\w+\}?)\"?\s+-p\s\"?(\$\{?\w+\}?)\"?",
r"ynh_mysql_setup_db --db_user=\2 --db_name=\2 --db_pwd=\3"),
# Replace
# sudo yunohost app checkport whaterver
# by
# ynh_port_available whatever
"yunohost app checkport": (
r"(sudo )?yunohost app checkport",
r"ynh_port_available"),
# We can't migrate easily port-available
# .. but at the time of writing this code, only two non-working apps are using it.
"yunohost tools port-available": (None, None),
# Replace
# yunohost app checkurl "${domain}${path_url}" -a "${app}"
# by
# ynh_webpath_register --app=${app} --domain=${domain} --path_url=${path_url}
"yunohost app checkurl": (
r"(sudo )?yunohost app checkurl \"?(\$\{?\w+\}?)\/?(\$\{?\w+\}?)\"?\s+-a\s\"?(\$\{?\w+\}?)\"?",
r"ynh_webpath_register --app=\4 --domain=\2 --path_url=\3"),
}
stuff_to_replace_compiled = {h: (re.compile(r[0]), r[1]) if r[0] else (None,None) for h, r in stuff_to_replace.items()}
for filename in files_to_patch:
# Ignore non-regular files
if not os.path.isfile(filename):
continue
content = read_file(filename)
replaced_stuff = False
for helper, regexes in stuff_to_replace_compiled.items():
pattern, replace = regexes
# If helper is used, attempt to patch the file
if helper in content and pattern != "":
content = pattern.sub(replace, content)
replaced_stuff = True
# If the helpert is *still* in the content, it means that we
# couldn't patch the deprecated helper in the previous lines. In
# that case, abort the install or whichever step is performed
if helper in content:
raise YunohostError("This app is likely pretty old and uses deprecated / outdated helpers that can't be migrated easily. It can't be installed anymore.")
if replaced_stuff:
# Check the app do load the helper
# If it doesn't, add the instruction ourselve (making sure it's after the #!/bin/bash if it's there...
if filename.split("/")[-1] in ["install", "remove", "upgrade", "backup", "restore"]:
source_helpers = "source /usr/share/yunohost/helpers"
if source_helpers not in content:
content.replace("#!/bin/bash", "#!/bin/bash\n"+source_helpers)
if source_helpers not in content:
content = source_helpers + "\n" + content
# Actually write the new content in the file
write_to_file(filename, content)
# And complain about those damn deprecated helpers
logger.error("/!\ Packagers ! This app uses a very old deprecated helpers ... Yunohost automatically patched the helpers to use the new recommended practice, but please do consider fixing the upstream code right now ...")

View file

@ -43,12 +43,11 @@ from moulinette.utils.log import getActionLogger
from moulinette.utils.filesystem import read_file, mkdir, write_to_yaml, read_yaml
from yunohost.app import (
app_info, _is_installed, _parse_app_instance_name, _patch_php5, dump_app_log_extract_for_debugging
app_info, _is_installed, _parse_app_instance_name, _patch_php5, dump_app_log_extract_for_debugging, _patch_legacy_helpers
)
from yunohost.hook import (
hook_list, hook_info, hook_callback, hook_exec, CUSTOM_HOOK_FOLDER
)
from yunohost.monitor import binary_to_human
from yunohost.tools import tools_postinstall
from yunohost.regenconf import regen_conf
from yunohost.log import OperationLogger
@ -1322,6 +1321,9 @@ class RestoreManager():
app_settings_in_archive = os.path.join(app_dir_in_archive, 'settings')
app_scripts_in_archive = os.path.join(app_settings_in_archive, 'scripts')
# Attempt to patch legacy helpers...
_patch_legacy_helpers(app_settings_in_archive)
# Apply dirty patch to make php5 apps compatible with php7
_patch_php5(app_settings_in_archive)
@ -2495,3 +2497,23 @@ def disk_usage(path):
du_output = subprocess.check_output(['du', '-sb', path])
return int(du_output.split()[0].decode('utf-8'))
def binary_to_human(n, customary=False):
"""
Convert bytes or bits into human readable format with binary prefix
Keyword argument:
n -- Number to convert
customary -- Use customary symbol instead of IEC standard
"""
symbols = ('Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
if customary:
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%s" % n

View file

@ -1,48 +1,13 @@
import os
from moulinette.utils.log import getActionLogger
from yunohost.app import app_fetchlist, app_removelist, _read_appslist_list, APPSLISTS_JSON
from yunohost.tools import Migration
logger = getActionLogger('yunohost.migration')
BASE_CONF_PATH = '/home/yunohost.conf'
BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, 'backup')
APPSLISTS_BACKUP = os.path.join(BACKUP_CONF_DIR, "appslist_before_migration_to_unified_list.json")
class MyMigration(Migration):
"Migrate from official.json to apps.json"
"Migrate from official.json to apps.json (outdated, replaced by migration 13)"
def run(self):
# Backup current app list json
os.system("cp %s %s" % (APPSLISTS_JSON, APPSLISTS_BACKUP))
# Remove all the deprecated lists
lists_to_remove = [
"app.yunohost.org/list.json", # Old list on old installs, alias to official.json
"app.yunohost.org/official.json",
"app.yunohost.org/community.json",
"labriqueinter.net/apps/labriqueinternet.json",
"labriqueinter.net/internetcube.json"
]
try:
appslists = _read_appslist_list()
for appslist, infos in appslists.items():
if infos["url"].split("//")[-1] in lists_to_remove:
app_removelist(name=appslist)
# Replace by apps.json list
app_fetchlist(name="yunohost",
url="https://app.yunohost.org/apps.json")
except Exception:
if os.path.exists(APPSLISTS_BACKUP):
os.system("cp %s %s" % (APPSLISTS_BACKUP, APPSLISTS_JSON))
raise
else:
if os.path.exists(APPSLISTS_BACKUP):
os.remove(APPSLISTS_BACKUP)
logger.info("This migration is oudated and doesn't do anything anymore. The migration 13 will handle this instead.")
pass

View file

@ -7,7 +7,7 @@ from moulinette.utils.log import getActionLogger
from moulinette.utils.filesystem import read_yaml
from yunohost.tools import Migration
from yunohost.user import user_group_create, user_group_update
from yunohost.user import user_list, user_group_create, user_group_update
from yunohost.app import app_setting, app_list
from yunohost.regenconf import regen_conf, BACKUP_CONF_DIR
from yunohost.permission import permission_create, user_permission_update, permission_sync_to_user
@ -109,10 +109,11 @@ class MyMigration(Migration):
url = "/" if domain and path else None
if permission:
allowed_groups = permission.split(',')
known_users = user_list()["users"].keys()
allowed = [user for user in permission.split(',') if user in known_users]
else:
allowed_groups = ["all_users"]
permission_create(app+".main", url=url, allowed=allowed_groups, sync_perm=False)
allowed = ["all_users"]
permission_create(app+".main", url=url, allowed=allowed, sync_perm=False)
app_setting(app, 'allowed_users', delete=True)

View file

@ -0,0 +1,46 @@
import os
import shutil
from moulinette.utils.log import getActionLogger
from moulinette.utils.filesystem import read_json
from yunohost.tools import Migration
from yunohost.app import (_initialize_apps_catalog_system,
_update_apps_catalog,
APPS_CATALOG_CACHE,
APPS_CATALOG_CONF)
logger = getActionLogger('yunohost.migration')
LEGACY_APPS_CATALOG_CONF = '/etc/yunohost/appslists.json'
LEGACY_APPS_CATALOG_CONF_BACKUP = LEGACY_APPS_CATALOG_CONF + ".old"
class MyMigration(Migration):
"Migrate to the new future-proof apps catalog system"
def migrate(self):
if not os.path.exists(LEGACY_APPS_CATALOG_CONF):
logger.info("No need to do anything")
# Destroy old lecacy cache
if os.path.exists(APPS_CATALOG_CACHE):
shutil.rmtree(APPS_CATALOG_CACHE)
# Backup the legacy file
try:
legacy_catalogs = read_json(LEGACY_APPS_CATALOG_CONF)
# If there's only one catalog, we assume it's just the old official catalog
# Otherwise, warn the (power-?)users that they should migrate their old catalogs manually
if len(legacy_catalogs) > 1:
logger.warning("It looks like you had additional apps_catalog in the configuration file %s! YunoHost now uses %s instead, but it won't migrate your custom apps_catalog. You should do this manually. The old file has been backuped in %s." % (LEGACY_APPS_CATALOG_CONF, APPS_CATALOG_CONF, LEGACY_APPS_CATALOG_CONF_BACKUP))
except Exception as e:
logger.warning("Unable to parse the legacy conf %s (error : %s) ... migrating anyway" % (LEGACY_APPS_CATALOG_CONF, str(e)))
os.rename(LEGACY_APPS_CATALOG_CONF, LEGACY_APPS_CATALOG_CONF_BACKUP)
_initialize_apps_catalog_system()
_update_apps_catalog()

426
src/yunohost/diagnosis.py Normal file
View file

@ -0,0 +1,426 @@
# -*- coding: utf-8 -*-
""" License
Copyright (C) 2018 YunoHost
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, see http://www.gnu.org/licenses
"""
""" diagnosis.py
Look for possible issues on the server
"""
import os
import time
from moulinette import m18n, msettings
from moulinette.utils import log
from moulinette.utils.filesystem import read_json, write_to_json, read_yaml, write_to_yaml
from yunohost.utils.error import YunohostError
from yunohost.hook import hook_list, hook_exec
logger = log.getActionLogger('yunohost.diagnosis')
DIAGNOSIS_CACHE = "/var/cache/yunohost/diagnosis/"
DIAGNOSIS_CONFIG_FILE = '/etc/yunohost/diagnosis.yml'
def diagnosis_list():
all_categories_names = [h for h, _ in _list_diagnosis_categories()]
return {"categories": all_categories_names}
def diagnosis_show(categories=[], issues=False, full=False, share=False):
# Get all the categories
all_categories = _list_diagnosis_categories()
all_categories_names = [category for category, _ in all_categories]
# Check the requested category makes sense
if categories == []:
categories = all_categories_names
else:
unknown_categories = [c for c in categories if c not in all_categories_names]
if unknown_categories:
raise YunohostError('diagnosis_unknown_categories', categories=", ".join(categories))
# Fetch all reports
all_reports = []
for category in categories:
try:
report = Diagnoser.get_cached_report(category)
except Exception as e:
logger.error(m18n.n("diagnosis_failed", category=category, error=str(e)))
else:
add_ignore_flag_to_issues(report)
if not full:
del report["timestamp"]
del report["cached_for"]
report["items"] = [item for item in report["items"] if not item["ignored"]]
for item in report["items"]:
del item["meta"]
del item["ignored"]
if "data" in item:
del item["data"]
if issues:
report["items"] = [item for item in report["items"] if item["status"] in ["WARNING", "ERROR"]]
# Ignore this category if no issue was found
if not report["items"]:
continue
all_reports.append(report)
if share:
from yunohost.utils.yunopaste import yunopaste
content = _dump_human_readable_reports(all_reports)
url = yunopaste(content)
logger.info(m18n.n("log_available_on_yunopaste", url=url))
if msettings.get('interface') == 'api':
return {"url": url}
else:
return
else:
return {"reports": all_reports}
def _dump_human_readable_reports(reports):
output = ""
for report in reports:
output += "=================================\n"
output += "{description} ({id})\n".format(**report)
output += "=================================\n\n"
for item in report["items"]:
output += "[{status}] {summary}\n".format(**item)
for detail in item.get("details", []):
output += " - " + detail + "\n"
output += "\n"
output += "\n\n"
return(output)
def diagnosis_run(categories=[], force=False):
# Get all the categories
all_categories = _list_diagnosis_categories()
all_categories_names = [category for category, _ in all_categories]
# Check the requested category makes sense
if categories == []:
categories = all_categories_names
else:
unknown_categories = [c for c in categories if c not in all_categories_names]
if unknown_categories:
raise YunohostError('diagnosis_unknown_categories', categories=", ".join(unknown_categories))
issues = []
# Call the hook ...
diagnosed_categories = []
for category in categories:
logger.debug("Running diagnosis for %s ..." % category)
path = [p for n, p in all_categories if n == category][0]
try:
code, report = hook_exec(path, args={"force": force}, env=None)
except Exception as e:
logger.error(m18n.n("diagnosis_failed_for_category", category=category, error=str(e)), exc_info=True)
else:
diagnosed_categories.append(category)
if report != {}:
issues.extend([item for item in report["items"] if item["status"] in ["WARNING", "ERROR"]])
if issues:
if msettings.get("interface") == "api":
logger.info(m18n.n("diagnosis_display_tip_web"))
else:
logger.info(m18n.n("diagnosis_display_tip_cli"))
return
def diagnosis_ignore(add_filter=None, remove_filter=None, list=False):
"""
This action is meant for the admin to ignore issues reported by the
diagnosis system if they are known and understood by the admin. For
example, the lack of ipv6 on an instance, or badly configured XMPP dns
records if the admin doesn't care so much about XMPP. The point being that
the diagnosis shouldn't keep complaining about those known and "expected"
issues, and instead focus on new unexpected issues that could arise.
For example, to ignore badly XMPP dnsrecords for domain yolo.test:
yunohost diagnosis ignore --add-filter dnsrecords domain=yolo.test category=xmpp
^ ^ ^
the general additional other
diagnosis criterias criteria
category to to target to target
act on specific specific
reports reports
Or to ignore all dnsrecords issues:
yunohost diagnosis ignore --add-filter dnsrecords
The filters are stored in the diagnosis configuration in a data structure like:
ignore_filters: {
"ip": [
{"version": 6} # Ignore all issues related to ipv6
],
"dnsrecords": [
{"domain": "yolo.test", "category": "xmpp"}, # Ignore all issues related to DNS xmpp records for yolo.test
{} # Ignore all issues about dnsrecords
]
}
"""
# Ignore filters are stored in
configuration = _diagnosis_read_configuration()
if list:
return {"ignore_filters": configuration.get("ignore_filters", {})}
def validate_filter_criterias(filter_):
# Get all the categories
all_categories = _list_diagnosis_categories()
all_categories_names = [category for category, _ in all_categories]
# Sanity checks for the provided arguments
if len(filter_) == 0:
raise YunohostError("You should provide at least one criteria being the diagnosis category to ignore")
category = filter_[0]
if category not in all_categories_names:
raise YunohostError("%s is not a diagnosis category" % category)
if any("=" not in criteria for criteria in filter_[1:]):
raise YunohostError("Extra criterias should be of the form key=value (e.g. domain=yolo.test)")
# Convert the provided criteria into a nice dict
criterias = {c.split("=")[0]: c.split("=")[1] for c in filter_[1:]}
return category, criterias
if add_filter:
category, criterias = validate_filter_criterias(add_filter)
# Fetch current issues for the requested category
current_issues_for_this_category = diagnosis_show(categories=[category], issues=True, full=True)
current_issues_for_this_category = current_issues_for_this_category["reports"][0].get("items", {})
# Accept the given filter only if the criteria effectively match an existing issue
if not any(issue_matches_criterias(i, criterias) for i in current_issues_for_this_category):
raise YunohostError("No issues was found matching the given criteria.")
# Make sure the subdicts/lists exists
if "ignore_filters" not in configuration:
configuration["ignore_filters"] = {}
if category not in configuration["ignore_filters"]:
configuration["ignore_filters"][category] = []
if criterias in configuration["ignore_filters"][category]:
logger.warning("This filter already exists.")
return
configuration["ignore_filters"][category].append(criterias)
_diagnosis_write_configuration(configuration)
logger.success("Filter added")
return
if remove_filter:
category, criterias = validate_filter_criterias(remove_filter)
# Make sure the subdicts/lists exists
if "ignore_filters" not in configuration:
configuration["ignore_filters"] = {}
if category not in configuration["ignore_filters"]:
configuration["ignore_filters"][category] = []
if criterias not in configuration["ignore_filters"][category]:
raise YunohostError("This filter does not exists.")
configuration["ignore_filters"][category].remove(criterias)
_diagnosis_write_configuration(configuration)
logger.success("Filter removed")
return
def _diagnosis_read_configuration():
if not os.path.exists(DIAGNOSIS_CONFIG_FILE):
return {}
return read_yaml(DIAGNOSIS_CONFIG_FILE)
def _diagnosis_write_configuration(conf):
write_to_yaml(DIAGNOSIS_CONFIG_FILE, conf)
def issue_matches_criterias(issue, criterias):
"""
e.g. an issue with:
meta:
domain: yolo.test
category: xmpp
matches the criterias {"domain": "yolo.test"}
"""
for key, value in criterias.items():
if key not in issue["meta"]:
return False
if str(issue["meta"][key]) != value:
return False
return True
def add_ignore_flag_to_issues(report):
"""
Iterate over issues in a report, and flag them as ignored if they match an
ignored filter from the configuration
N.B. : for convenience. we want to make sure the "ignored" key is set for
every item in the report
"""
ignore_filters = _diagnosis_read_configuration().get("ignore_filters", {}).get(report["id"], [])
for report_item in report["items"]:
report_item["ignored"] = False
if report_item["status"] not in ["WARNING", "ERROR"]:
continue
for criterias in ignore_filters:
if issue_matches_criterias(report_item, criterias):
report_item["ignored"] = True
break
############################################################
class Diagnoser():
def __init__(self, args, env, loggers):
# FIXME ? That stuff with custom loggers is weird ... (mainly inherited from the bash hooks, idk)
self.logger_debug, self.logger_warning, self.logger_info = loggers
self.env = env
self.args = args or {}
self.cache_file = Diagnoser.cache_file(self.id_)
self.description = Diagnoser.get_description(self.id_)
def cached_time_ago(self):
if not os.path.exists(self.cache_file):
return 99999999
return time.time() - os.path.getmtime(self.cache_file)
def write_cache(self, report):
if not os.path.exists(DIAGNOSIS_CACHE):
os.makedirs(DIAGNOSIS_CACHE)
return write_to_json(self.cache_file, report)
def diagnose(self):
if not self.args.get("force", False) and self.cached_time_ago() < self.cache_duration:
self.logger_debug("Cache still valid : %s" % self.cache_file)
logger.info(m18n.n("diagnosis_cache_still_valid", category=self.description))
return 0, {}
for dependency in self.dependencies:
dep_report = Diagnoser.get_cached_report(dependency)
dep_errors = [item for item in dep_report["items"] if item["status"] == "ERROR"]
if dep_errors:
logger.error(m18n.n("diagnosis_cant_run_because_of_dep", category=self.description, dep=Diagnoser.get_description(dependency)))
return 1, {}
self.logger_debug("Running diagnostic for %s" % self.id_)
items = list(self.run())
new_report = {"id": self.id_,
"cached_for": self.cache_duration,
"items": items}
self.logger_debug("Updating cache %s" % self.cache_file)
self.write_cache(new_report)
Diagnoser.i18n(new_report)
add_ignore_flag_to_issues(new_report)
errors = [item for item in new_report["items"] if item["status"] == "ERROR" and not item["ignored"]]
warnings = [item for item in new_report["items"] if item["status"] == "WARNING" and not item["ignored"]]
errors_ignored = [item for item in new_report["items"] if item["status"] == "ERROR" and item["ignored"]]
warning_ignored = [item for item in new_report["items"] if item["status"] == "WARNING" and item["ignored"]]
ignored_msg = " " + m18n.n("diagnosis_ignored_issues", nb_ignored=len(errors_ignored+warning_ignored)) if errors_ignored or warning_ignored else ""
if errors and warnings:
logger.error(m18n.n("diagnosis_found_errors_and_warnings", errors=len(errors), warnings=len(warnings), category=new_report["description"]) + ignored_msg)
elif errors:
logger.error(m18n.n("diagnosis_found_errors", errors=len(errors), category=new_report["description"]) + ignored_msg)
elif warnings:
logger.warning(m18n.n("diagnosis_found_warnings", warnings=len(warnings), category=new_report["description"]) + ignored_msg)
else:
logger.success(m18n.n("diagnosis_everything_ok", category=new_report["description"]) + ignored_msg)
return 0, new_report
@staticmethod
def cache_file(id_):
return os.path.join(DIAGNOSIS_CACHE, "%s.json" % id_)
@staticmethod
def get_cached_report(id_):
filename = Diagnoser.cache_file(id_)
report = read_json(filename)
report["timestamp"] = int(os.path.getmtime(filename))
Diagnoser.i18n(report)
return report
@staticmethod
def get_description(id_):
key = "diagnosis_description_" + id_
descr = m18n.n(key)
# If no description available, fallback to id
return descr if descr != key else id_
@staticmethod
def i18n(report):
# "Render" the strings with m18n.n
# N.B. : we do those m18n.n right now instead of saving the already-translated report
# because we can't be sure we'll redisplay the infos with the same locale as it
# was generated ... e.g. if the diagnosing happened inside a cron job with locale EN
# instead of FR used by the actual admin...
report["description"] = Diagnoser.get_description(report["id"])
for item in report["items"]:
summary_key, summary_args = item["summary"]
item["summary"] = m18n.n(summary_key, **summary_args)
if "details" in item:
item["details"] = [m18n.n(key, *values) for key, values in item["details"]]
def _list_diagnosis_categories():
hooks_raw = hook_list("diagnosis", list_by="priority", show_info=True)["hooks"]
hooks = []
for _, some_hooks in sorted(hooks_raw.items(), key=lambda h: int(h[0])):
for name, info in some_hooks.items():
hooks.append((name, info["path"]))
return hooks

View file

@ -34,6 +34,7 @@ from moulinette.utils.log import getActionLogger
import yunohost.certificate
from yunohost.app import app_ssowatconf
from yunohost.regenconf import regen_conf
from yunohost.utils.network import get_public_ip
from yunohost.log import is_unit_operation
@ -154,7 +155,14 @@ def domain_remove(operation_logger, domain, force=False):
# Check domain is not the main domain
if domain == _get_maindomain():
raise YunohostError('domain_cannot_remove_main')
other_domains = domain_list()["domains"]
other_domains.remove(domain)
if other_domains:
raise YunohostError('domain_cannot_remove_main',
domain=domain, other_domains="\n * " + ("\n * ".join(other_domains)))
else:
raise YunohostError('domain_cannot_remove_main_add_new_one', domain=domain)
# Check if apps are installed on the domain
for app in os.listdir('/etc/yunohost/apps/'):
@ -233,6 +241,63 @@ def domain_dns_conf(domain, ttl=None):
return result
@is_unit_operation()
def domain_main_domain(operation_logger, new_main_domain=None):
"""
Check the current main domain, or change it
Keyword argument:
new_main_domain -- The new domain to be set as the main domain
"""
from yunohost.tools import _set_hostname
# If no new domain specified, we return the current main domain
if not new_main_domain:
return {'current_main_domain': _get_maindomain()}
# Check domain exists
if new_main_domain not in domain_list()['domains']:
raise YunohostError('domain_unknown')
operation_logger.related_to.append(('domain', new_main_domain))
operation_logger.start()
# Apply changes to ssl certs
ssl_key = "/etc/ssl/private/yunohost_key.pem"
ssl_crt = "/etc/ssl/private/yunohost_crt.pem"
new_ssl_key = "/etc/yunohost/certs/%s/key.pem" % new_main_domain
new_ssl_crt = "/etc/yunohost/certs/%s/crt.pem" % new_main_domain
try:
if os.path.exists(ssl_key) or os.path.lexists(ssl_key):
os.remove(ssl_key)
if os.path.exists(ssl_crt) or os.path.lexists(ssl_crt):
os.remove(ssl_crt)
os.symlink(new_ssl_key, ssl_key)
os.symlink(new_ssl_crt, ssl_crt)
_set_maindomain(new_main_domain)
except Exception as e:
logger.warning("%s" % e, exc_info=1)
raise YunohostError('main_domain_change_failed')
_set_hostname(new_main_domain)
# Generate SSOwat configuration file
app_ssowatconf()
# Regen configurations
try:
with open('/etc/yunohost/installed', 'r'):
regen_conf()
except IOError:
pass
logger.success(m18n.n('main_domain_changed'))
def domain_cert_status(domain_list, full=False):
return yunohost.certificate.certificate_status(domain_list, full)

View file

@ -1,740 +0,0 @@
# -*- coding: utf-8 -*-
""" License
Copyright (C) 2013 YunoHost
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, see http://www.gnu.org/licenses
"""
""" yunohost_monitor.py
Monitoring functions
"""
import re
import json
import time
import psutil
import calendar
import subprocess
import xmlrpclib
import os.path
import os
import dns.resolver
import cPickle as pickle
from datetime import datetime
from moulinette import m18n
from yunohost.utils.error import YunohostError
from moulinette.utils.log import getActionLogger
from yunohost.utils.network import get_public_ip
from yunohost.domain import _get_maindomain
logger = getActionLogger('yunohost.monitor')
GLANCES_URI = 'http://127.0.0.1:61209'
STATS_PATH = '/var/lib/yunohost/stats'
CRONTAB_PATH = '/etc/cron.d/yunohost-monitor'
def monitor_disk(units=None, mountpoint=None, human_readable=False):
"""
Monitor disk space and usage
Keyword argument:
units -- Unit(s) to monitor
mountpoint -- Device mountpoint
human_readable -- Print sizes in human readable format
"""
glances = _get_glances_api()
result_dname = None
result = {}
if units is None:
units = ['io', 'filesystem']
_format_dname = lambda d: (os.path.realpath(d)).replace('/dev/', '')
# Get mounted devices
devices = {}
for p in psutil.disk_partitions(all=True):
if not p.device.startswith('/dev/') or not p.mountpoint:
continue
if mountpoint is None:
devices[_format_dname(p.device)] = p.mountpoint
elif mountpoint == p.mountpoint:
dn = _format_dname(p.device)
devices[dn] = p.mountpoint
result_dname = dn
if len(devices) == 0:
if mountpoint is not None:
raise YunohostError('mountpoint_unknown')
return result
# Retrieve monitoring for unit(s)
for u in units:
if u == 'io':
# Define setter
if len(units) > 1:
def _set(dn, dvalue):
try:
result[dn][u] = dvalue
except KeyError:
result[dn] = {u: dvalue}
else:
def _set(dn, dvalue):
result[dn] = dvalue
# Iterate over values
devices_names = devices.keys()
for d in json.loads(glances.getDiskIO()):
dname = d.pop('disk_name')
try:
devices_names.remove(dname)
except:
continue
else:
_set(dname, d)
for dname in devices_names:
_set(dname, 'not-available')
elif u == 'filesystem':
# Define setter
if len(units) > 1:
def _set(dn, dvalue):
try:
result[dn][u] = dvalue
except KeyError:
result[dn] = {u: dvalue}
else:
def _set(dn, dvalue):
result[dn] = dvalue
# Iterate over values
devices_names = devices.keys()
for d in json.loads(glances.getFs()):
dname = _format_dname(d.pop('device_name'))
try:
devices_names.remove(dname)
except:
continue
else:
d['avail'] = d['size'] - d['used']
if human_readable:
for i in ['used', 'avail', 'size']:
d[i] = binary_to_human(d[i]) + 'B'
_set(dname, d)
for dname in devices_names:
_set(dname, 'not-available')
else:
raise YunohostError('unit_unknown', unit=u)
if result_dname is not None:
return result[result_dname]
return result
def monitor_network(units=None, human_readable=False):
"""
Monitor network interfaces
Keyword argument:
units -- Unit(s) to monitor
human_readable -- Print sizes in human readable format
"""
glances = _get_glances_api()
result = {}
if units is None:
units = ['check', 'usage', 'infos']
# Get network devices and their addresses
# TODO / FIXME : use functions in utils/network.py to manage this
devices = {}
output = subprocess.check_output('ip addr show'.split())
for d in re.split('^(?:[0-9]+: )', output, flags=re.MULTILINE):
# Extract device name (1) and its addresses (2)
m = re.match('([^\s@]+)(?:@[\S]+)?: (.*)', d, flags=re.DOTALL)
if m:
devices[m.group(1)] = m.group(2)
# Retrieve monitoring for unit(s)
for u in units:
if u == 'check':
result[u] = {}
domain = _get_maindomain()
cmd_check_smtp = os.system('/bin/nc -z -w1 yunohost.org 25')
if cmd_check_smtp == 0:
smtp_check = m18n.n('network_check_smtp_ok')
else:
smtp_check = m18n.n('network_check_smtp_ko')
try:
answers = dns.resolver.query(domain, 'MX')
mx_check = {}
i = 0
for server in answers:
mx_id = 'mx%s' % i
mx_check[mx_id] = server
i = i + 1
except:
mx_check = m18n.n('network_check_mx_ko')
result[u] = {
'smtp_check': smtp_check,
'mx_check': mx_check
}
elif u == 'usage':
result[u] = {}
for i in json.loads(glances.getNetwork()):
iname = i['interface_name']
if iname in devices.keys():
del i['interface_name']
if human_readable:
for k in i.keys():
if k != 'time_since_update':
i[k] = binary_to_human(i[k]) + 'B'
result[u][iname] = i
else:
logger.debug('interface name %s was not found', iname)
elif u == 'infos':
p_ipv4 = get_public_ip() or 'unknown'
# TODO / FIXME : use functions in utils/network.py to manage this
l_ip = 'unknown'
for name, addrs in devices.items():
if name == 'lo':
continue
if not isinstance(l_ip, dict):
l_ip = {}
l_ip[name] = _extract_inet(addrs)
gateway = 'unknown'
output = subprocess.check_output('ip route show'.split())
m = re.search('default via (.*) dev ([a-z]+[0-9]?)', output)
if m:
addr = _extract_inet(m.group(1), True)
if len(addr) == 1:
proto, gateway = addr.popitem()
result[u] = {
'public_ip': p_ipv4,
'local_ip': l_ip,
'gateway': gateway,
}
else:
raise YunohostError('unit_unknown', unit=u)
if len(units) == 1:
return result[units[0]]
return result
def monitor_system(units=None, human_readable=False):
"""
Monitor system informations and usage
Keyword argument:
units -- Unit(s) to monitor
human_readable -- Print sizes in human readable format
"""
glances = _get_glances_api()
result = {}
if units is None:
units = ['memory', 'cpu', 'process', 'uptime', 'infos']
# Retrieve monitoring for unit(s)
for u in units:
if u == 'memory':
ram = json.loads(glances.getMem())
swap = json.loads(glances.getMemSwap())
if human_readable:
for i in ram.keys():
if i != 'percent':
ram[i] = binary_to_human(ram[i]) + 'B'
for i in swap.keys():
if i != 'percent':
swap[i] = binary_to_human(swap[i]) + 'B'
result[u] = {
'ram': ram,
'swap': swap
}
elif u == 'cpu':
result[u] = {
'load': json.loads(glances.getLoad()),
'usage': json.loads(glances.getCpu())
}
elif u == 'process':
result[u] = json.loads(glances.getProcessCount())
elif u == 'uptime':
result[u] = (str(datetime.now() - datetime.fromtimestamp(psutil.boot_time())).split('.')[0])
elif u == 'infos':
result[u] = json.loads(glances.getSystem())
else:
raise YunohostError('unit_unknown', unit=u)
if len(units) == 1 and not isinstance(result[units[0]], str):
return result[units[0]]
return result
def monitor_update_stats(period):
"""
Update monitoring statistics
Keyword argument:
period -- Time period to update (day, week, month)
"""
if period not in ['day', 'week', 'month']:
raise YunohostError('monitor_period_invalid')
stats = _retrieve_stats(period)
if not stats:
stats = {'disk': {}, 'network': {}, 'system': {}, 'timestamp': []}
monitor = None
# Get monitoring stats
if period == 'day':
monitor = _monitor_all('day')
else:
t = stats['timestamp']
p = 'day' if period == 'week' else 'week'
if len(t) > 0:
monitor = _monitor_all(p, t[len(t) - 1])
else:
monitor = _monitor_all(p, 0)
if not monitor:
raise YunohostError('monitor_stats_no_update')
stats['timestamp'].append(time.time())
# Append disk stats
for dname, units in monitor['disk'].items():
disk = {}
# Retrieve current stats for disk name
if dname in stats['disk'].keys():
disk = stats['disk'][dname]
for unit, values in units.items():
# Continue if unit doesn't contain stats
if not isinstance(values, dict):
continue
# Retrieve current stats for unit and append new ones
curr = disk[unit] if unit in disk.keys() else {}
if unit == 'io':
disk[unit] = _append_to_stats(curr, values, 'time_since_update')
elif unit == 'filesystem':
disk[unit] = _append_to_stats(curr, values, ['fs_type', 'mnt_point'])
stats['disk'][dname] = disk
# Append network stats
net_usage = {}
for iname, values in monitor['network']['usage'].items():
# Continue if units doesn't contain stats
if not isinstance(values, dict):
continue
# Retrieve current stats and append new ones
curr = {}
if 'usage' in stats['network'] and iname in stats['network']['usage']:
curr = stats['network']['usage'][iname]
net_usage[iname] = _append_to_stats(curr, values, 'time_since_update')
stats['network'] = {'usage': net_usage, 'infos': monitor['network']['infos']}
# Append system stats
for unit, values in monitor['system'].items():
# Continue if units doesn't contain stats
if not isinstance(values, dict):
continue
# Set static infos unit
if unit == 'infos':
stats['system'][unit] = values
continue
# Retrieve current stats and append new ones
curr = stats['system'][unit] if unit in stats['system'].keys() else {}
stats['system'][unit] = _append_to_stats(curr, values)
_save_stats(stats, period)
def monitor_show_stats(period, date=None):
"""
Show monitoring statistics
Keyword argument:
period -- Time period to show (day, week, month)
"""
if period not in ['day', 'week', 'month']:
raise YunohostError('monitor_period_invalid')
result = _retrieve_stats(period, date)
if result is False:
raise YunohostError('monitor_stats_file_not_found')
elif result is None:
raise YunohostError('monitor_stats_period_unavailable')
return result
def monitor_enable(with_stats=False):
"""
Enable server monitoring
Keyword argument:
with_stats -- Enable monitoring statistics
"""
from yunohost.service import (service_status, service_enable,
service_start)
glances = service_status('glances')
if glances['status'] != 'running':
service_start('glances')
if glances['loaded'] != 'enabled':
service_enable('glances')
# Install crontab
if with_stats:
# day: every 5 min # week: every 1 h # month: every 4 h #
rules = ('*/5 * * * * root {cmd} day >> /dev/null\n'
'3 * * * * root {cmd} week >> /dev/null\n'
'6 */4 * * * root {cmd} month >> /dev/null').format(
cmd='/usr/bin/yunohost --quiet monitor update-stats')
with open(CRONTAB_PATH, 'w') as f:
f.write(rules)
logger.success(m18n.n('monitor_enabled'))
def monitor_disable():
"""
Disable server monitoring
"""
from yunohost.service import (service_status, service_disable,
service_stop)
glances = service_status('glances')
if glances['status'] != 'inactive':
service_stop('glances')
if glances['loaded'] != 'disabled':
try:
service_disable('glances')
except YunohostError as e:
logger.warning(e.strerror)
# Remove crontab
try:
os.remove(CRONTAB_PATH)
except:
pass
logger.success(m18n.n('monitor_disabled'))
def _get_glances_api():
"""
Retrieve Glances API running on the local server
"""
try:
p = xmlrpclib.ServerProxy(GLANCES_URI)
p.system.methodHelp('getAll')
except (xmlrpclib.ProtocolError, IOError):
pass
else:
return p
from yunohost.service import service_status
if service_status('glances')['status'] != 'running':
raise YunohostError('monitor_not_enabled')
raise YunohostError('monitor_glances_con_failed')
def _extract_inet(string, skip_netmask=False, skip_loopback=True):
"""
Extract IP addresses (v4 and/or v6) from a string limited to one
address by protocol
Keyword argument:
string -- String to search in
skip_netmask -- True to skip subnet mask extraction
skip_loopback -- False to include addresses reserved for the
loopback interface
Returns:
A dict of {protocol: address} with protocol one of 'ipv4' or 'ipv6'
"""
ip4_pattern = '((25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
ip6_pattern = '(((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)'
ip4_pattern += '/[0-9]{1,2})' if not skip_netmask else ')'
ip6_pattern += '/[0-9]{1,3})' if not skip_netmask else ')'
result = {}
for m in re.finditer(ip4_pattern, string):
addr = m.group(1)
if skip_loopback and addr.startswith('127.'):
continue
# Limit to only one result
result['ipv4'] = addr
break
for m in re.finditer(ip6_pattern, string):
addr = m.group(1)
if skip_loopback and addr == '::1':
continue
# Limit to only one result
result['ipv6'] = addr
break
return result
def binary_to_human(n, customary=False):
"""
Convert bytes or bits into human readable format with binary prefix
Keyword argument:
n -- Number to convert
customary -- Use customary symbol instead of IEC standard
"""
symbols = ('Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
if customary:
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%s" % n
def _retrieve_stats(period, date=None):
"""
Retrieve statistics from pickle file
Keyword argument:
period -- Time period to retrieve (day, week, month)
date -- Date of stats to retrieve
"""
pkl_file = None
# Retrieve pickle file
if date is not None:
timestamp = calendar.timegm(date)
pkl_file = '%s/%d_%s.pkl' % (STATS_PATH, timestamp, period)
else:
pkl_file = '%s/%s.pkl' % (STATS_PATH, period)
if not os.path.isfile(pkl_file):
return False
# Read file and process its content
with open(pkl_file, 'r') as f:
result = pickle.load(f)
if not isinstance(result, dict):
return None
return result
def _save_stats(stats, period, date=None):
"""
Save statistics to pickle file
Keyword argument:
stats -- Stats dict to save
period -- Time period of stats (day, week, month)
date -- Date of stats
"""
pkl_file = None
# Set pickle file name
if date is not None:
timestamp = calendar.timegm(date)
pkl_file = '%s/%d_%s.pkl' % (STATS_PATH, timestamp, period)
else:
pkl_file = '%s/%s.pkl' % (STATS_PATH, period)
if not os.path.isdir(STATS_PATH):
os.makedirs(STATS_PATH)
# Limit stats
if date is None:
t = stats['timestamp']
limit = {'day': 86400, 'week': 604800, 'month': 2419200}
if (t[len(t) - 1] - t[0]) > limit[period]:
begin = t[len(t) - 1] - limit[period]
stats = _filter_stats(stats, begin)
# Write file content
with open(pkl_file, 'w') as f:
pickle.dump(stats, f)
return True
def _monitor_all(period=None, since=None):
"""
Monitor all units (disk, network and system) for the given period
If since is None, real-time monitoring is returned. Otherwise, the
mean of stats since this timestamp is calculated and returned.
Keyword argument:
period -- Time period to monitor (day, week, month)
since -- Timestamp of the stats beginning
"""
result = {'disk': {}, 'network': {}, 'system': {}}
# Real-time stats
if period == 'day' and since is None:
result['disk'] = monitor_disk()
result['network'] = monitor_network()
result['system'] = monitor_system()
return result
# Retrieve stats and calculate mean
stats = _retrieve_stats(period)
if not stats:
return None
stats = _filter_stats(stats, since)
if not stats:
return None
result = _calculate_stats_mean(stats)
return result
def _filter_stats(stats, t_begin=None, t_end=None):
"""
Filter statistics by beginning and/or ending timestamp
Keyword argument:
stats -- Dict stats to filter
t_begin -- Beginning timestamp
t_end -- Ending timestamp
"""
if t_begin is None and t_end is None:
return stats
i_begin = i_end = None
# Look for indexes of timestamp interval
for i, t in enumerate(stats['timestamp']):
if t_begin and i_begin is None and t >= t_begin:
i_begin = i
if t_end and i != 0 and i_end is None and t > t_end:
i_end = i
# Check indexes
if i_begin is None:
if t_begin and t_begin > stats['timestamp'][0]:
return None
i_begin = 0
if i_end is None:
if t_end and t_end < stats['timestamp'][0]:
return None
i_end = len(stats['timestamp'])
if i_begin == 0 and i_end == len(stats['timestamp']):
return stats
# Filter function
def _filter(s, i, j):
for k, v in s.items():
if isinstance(v, dict):
s[k] = _filter(v, i, j)
elif isinstance(v, list):
s[k] = v[i:j]
return s
stats = _filter(stats, i_begin, i_end)
return stats
def _calculate_stats_mean(stats):
"""
Calculate the weighted mean for each statistic
Keyword argument:
stats -- Stats dict to process
"""
timestamp = stats['timestamp']
t_sum = sum(timestamp)
del stats['timestamp']
# Weighted mean function
def _mean(s, t, ts):
for k, v in s.items():
if isinstance(v, dict):
s[k] = _mean(v, t, ts)
elif isinstance(v, list):
try:
nums = [float(x * t[i]) for i, x in enumerate(v)]
except:
pass
else:
s[k] = sum(nums) / float(ts)
return s
stats = _mean(stats, timestamp, t_sum)
return stats
def _append_to_stats(stats, monitor, statics=[]):
"""
Append monitoring statistics to current statistics
Keyword argument:
stats -- Current stats dict
monitor -- Monitoring statistics
statics -- List of stats static keys
"""
if isinstance(statics, str):
statics = [statics]
# Appending function
def _append(s, m, st):
for k, v in m.items():
if k in st:
s[k] = v
elif isinstance(v, dict):
if k not in s:
s[k] = {}
s[k] = _append(s[k], v, st)
else:
if k not in s:
s[k] = []
if isinstance(v, list):
s[k].extend(v)
else:
s[k].append(v)
return s
stats = _append(stats, monitor, statics)
return stats

View file

@ -92,7 +92,6 @@ def user_permission_update(operation_logger, permission, add=None, remove=None,
remove -- List of groups or usernames to remove from to this permission
"""
from yunohost.hook import hook_callback
from yunohost.user import user_group_list
from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface()
@ -111,7 +110,6 @@ def user_permission_update(operation_logger, permission, add=None, remove=None,
raise YunohostError('permission_not_found', permission=permission)
current_allowed_groups = existing_permission["allowed"]
all_existing_groups = user_group_list()['groups'].keys()
operation_logger.related_to.append(('app', permission.split(".")[0]))
# Compute new allowed group list (and make sure what we're doing make sense)
@ -121,8 +119,6 @@ def user_permission_update(operation_logger, permission, add=None, remove=None,
if add:
groups_to_add = [add] if not isinstance(add, list) else add
for group in groups_to_add:
if group not in all_existing_groups:
raise YunohostError('group_unknown', group=group)
if group in current_allowed_groups:
logger.warning(m18n.n('permission_already_allowed', permission=permission, group=group))
else:
@ -133,8 +129,6 @@ def user_permission_update(operation_logger, permission, add=None, remove=None,
if remove:
groups_to_remove = [remove] if not isinstance(remove, list) else remove
for group in groups_to_remove:
if group not in all_existing_groups:
raise YunohostError('group_unknown', group=group)
if group not in current_allowed_groups:
logger.warning(m18n.n('permission_already_disallowed', permission=permission, group=group))
else:
@ -161,36 +155,10 @@ def user_permission_update(operation_logger, permission, add=None, remove=None,
operation_logger.start()
try:
ldap.update('cn=%s,ou=permission' % permission,
{'groupPermission': ['cn=' + g + ',ou=groups,dc=yunohost,dc=org' for g in new_allowed_groups]})
except Exception as e:
raise YunohostError('permission_update_failed', permission=permission, error=e)
new_permission = _update_ldap_group_permission(permission=permission, allowed=new_allowed_groups, sync_perm=sync_perm)
logger.debug(m18n.n('permission_updated', permission=permission))
# Trigger permission sync if asked
if sync_perm:
permission_sync_to_user()
new_permission = user_permission_list(full=True)["permissions"][permission]
# Trigger app callbacks
app = permission.split(".")[0]
old_allowed_users = set(existing_permission["corresponding_users"])
new_allowed_users = set(new_permission["corresponding_users"])
effectively_added_users = new_allowed_users - old_allowed_users
effectively_removed_users = old_allowed_users - new_allowed_users
if effectively_added_users:
hook_callback('post_app_addaccess', args=[app, ','.join(effectively_added_users)])
if effectively_removed_users:
hook_callback('post_app_removeaccess', args=[app, ','.join(effectively_removed_users)])
return new_permission
@ -225,34 +193,10 @@ def user_permission_reset(operation_logger, permission, sync_perm=True):
operation_logger.related_to.append(('app', permission.split(".")[0]))
operation_logger.start()
default_permission = {'groupPermission': ['cn=all_users,ou=groups,dc=yunohost,dc=org']}
try:
ldap.update('cn=%s,ou=permission' % permission, default_permission)
except Exception as e:
raise YunohostError('permission_update_failed', permission=permission, error=e)
new_permission = _update_ldap_group_permission(permission=permission, allowed="all_users", sync_perm=sync_perm)
logger.debug(m18n.n('permission_updated', permission=permission))
if sync_perm:
permission_sync_to_user()
new_permission = user_permission_list(full=True)["permissions"][permission]
# Trigger app callbacks
app = permission.split(".")[0]
old_allowed_users = set(existing_permission["corresponding_users"])
new_allowed_users = set(new_permission["corresponding_users"])
effectively_added_users = new_allowed_users - old_allowed_users
effectively_removed_users = old_allowed_users - new_allowed_users
if effectively_added_users:
hook_callback('post_app_addaccess', args=[app, ','.join(effectively_added_users)])
if effectively_removed_users:
hook_callback('post_app_removeaccess', args=[app, ','.join(effectively_removed_users)])
return new_permission
#
@ -286,7 +230,6 @@ def permission_create(operation_logger, permission, url=None, allowed=None, sync
re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
"""
from yunohost.user import user_group_list
from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface()
@ -313,20 +256,6 @@ def permission_create(operation_logger, permission, url=None, allowed=None, sync
'gidNumber': gid,
}
# If who should be allowed is explicitly provided, use this info
if allowed:
if not isinstance(allowed, list):
allowed = [allowed]
# (though first we validate that the targets actually exist)
all_existing_groups = user_group_list()['groups'].keys()
for g in allowed:
if g not in all_existing_groups:
raise YunohostError('group_unknown', group=g)
attr_dict['groupPermission'] = ['cn=%s,ou=groups,dc=yunohost,dc=org' % g for g in allowed]
# For main permission, we add all users by default
elif permission.endswith(".main"):
attr_dict['groupPermission'] = ['cn=all_users,ou=groups,dc=yunohost,dc=org']
if url:
attr_dict['URL'] = url
@ -338,11 +267,20 @@ def permission_create(operation_logger, permission, url=None, allowed=None, sync
except Exception as e:
raise YunohostError('permission_creation_failed', permission=permission, error=e)
if sync_perm:
permission_sync_to_user()
to_add = None
# If who should be allowed is explicitly provided, use this info
if allowed:
if not isinstance(allowed, list):
to_add = [allowed]
# For main permission, we add all users by default
elif permission.endswith(".main"):
to_add = "all_users"
new_permission = _update_ldap_group_permission(permission=permission, allowed=to_add, sync_perm=sync_perm)
logger.debug(m18n.n('permission_created', permission=permission))
return user_permission_list(full=True)["permissions"][permission]
return new_permission
@is_unit_operation()
@ -471,3 +409,68 @@ def permission_sync_to_user():
# Reload unscd, otherwise the group ain't propagated to the LDAP database
os.system('nscd --invalidate=passwd')
os.system('nscd --invalidate=group')
def _update_ldap_group_permission(permission, allowed, sync_perm=True):
"""
Internal function that will rewrite user permission
permission -- Name of the permission (e.g. mail or nextcloud or wordpress.editors)
allowed -- A list of group/user to allow for the permission
"""
from yunohost.hook import hook_callback
from yunohost.user import user_group_list
from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface()
# Fetch currently allowed groups for this permission
existing_permission = user_permission_list(full=True)["permissions"].get(permission, None)
if existing_permission is None:
raise YunohostError('permission_not_found', permission=permission)
all_existing_groups = user_group_list()['groups'].keys()
if allowed:
if not isinstance(allowed, list):
allowed = [allowed]
for group in allowed:
if group not in all_existing_groups:
raise YunohostError('group_unknown', group=group)
else:
if sync_perm:
permission_sync_to_user()
return user_permission_list(full=True)["permissions"][permission]
try:
ldap.update('cn=%s,ou=permission' % permission,
{'groupPermission': ['cn=' + g + ',ou=groups,dc=yunohost,dc=org' for g in allowed]})
except Exception as e:
raise YunohostError('permission_update_failed', permission=permission, error=e)
# Trigger permission sync if asked
if sync_perm:
permission_sync_to_user()
new_permission = user_permission_list(full=True)["permissions"][permission]
# Trigger app callbacks
app = permission.split(".")[0]
sub_permission = permission.split(".")[1]
old_allowed_users = set(existing_permission["corresponding_users"])
new_allowed_users = set(new_permission["corresponding_users"])
effectively_added_users = new_allowed_users - old_allowed_users
effectively_removed_users = old_allowed_users - new_allowed_users
if effectively_added_users:
hook_callback('post_app_addaccess', args=[app, ','.join(effectively_added_users), sub_permission])
if effectively_removed_users:
hook_callback('post_app_removeaccess', args=[app, ','.join(effectively_removed_users), sub_permission])
return new_permission

View file

@ -131,6 +131,16 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
show_info=False)['hooks']
names.remove('ssh')
# Dirty hack for legacy code : avoid attempting to regen the conf for
# glances because it got removed ... This is only needed *once*
# during the upgrade from 3.7 to 3.8 because Yunohost will attempt to
# regen glance's conf *before* it gets automatically removed from
# services.yml (which will happens only during the regen-conf of
# 'yunohost', so at the very end of the regen-conf cycle) Anyway,
# this can be safely removed once we're in >= 4.0
if "glances" in names:
names.remove("glances")
pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call)
# Keep only the hook names with at least one success
@ -525,31 +535,32 @@ def _process_regen_conf(system_conf, new_conf=None, save=True):
def manually_modified_files():
# We do this to have --quiet, i.e. don't throw a whole bunch of logs
# just to fetch this...
# Might be able to optimize this by looking at what the regen conf does
# and only do the part that checks file hashes...
cmd = "yunohost tools regen-conf --dry-run --output-as json --quiet"
j = json.loads(subprocess.check_output(cmd.split()))
# j is something like :
# {"postfix": {"applied": {}, "pending": {"/etc/postfix/main.cf": {"status": "modified"}}}
output = []
for app, actions in j.items():
for action, files in actions.items():
for filename, infos in files.items():
if infos["status"] == "modified":
output.append(filename)
regenconf_categories = _get_regenconf_infos()
for category, infos in regenconf_categories.items():
conffiles = infos["conffiles"]
for path, hash_ in conffiles.items():
if hash_ != _calculate_hash(path):
output.append(path)
return output
def manually_modified_files_compared_to_debian_default():
def manually_modified_files_compared_to_debian_default(ignore_handled_by_regenconf=False):
# from https://serverfault.com/a/90401
r = subprocess.check_output("dpkg-query -W -f='${Conffiles}\n' '*' \
| awk 'OFS=\" \"{print $2,$1}' \
| md5sum -c 2>/dev/null \
| awk -F': ' '$2 !~ /OK/{print $1}'", shell=True)
return r.strip().split("\n")
files = subprocess.check_output("dpkg-query -W -f='${Conffiles}\n' '*' \
| awk 'OFS=\" \"{print $2,$1}' \
| md5sum -c 2>/dev/null \
| awk -F': ' '$2 !~ /OK/{print $1}'", shell=True)
files = files.strip().split("\n")
if ignore_handled_by_regenconf:
regenconf_categories = _get_regenconf_infos()
regenconf_files = []
for infos in regenconf_categories.values():
regenconf_files.extend(infos["conffiles"].keys())
files = [f for f in files if f not in regenconf_files]
return files

View file

@ -0,0 +1,358 @@
import os
import pytest
import requests
import requests_mock
import glob
import shutil
from moulinette import m18n
from moulinette.utils.filesystem import read_json, write_to_json, write_to_yaml, mkdir
from yunohost.utils.error import YunohostError
from yunohost.app import (_initialize_apps_catalog_system,
_read_apps_catalog_list,
_update_apps_catalog,
_actual_apps_catalog_api_url,
_load_apps_catalog,
logger,
APPS_CATALOG_CACHE,
APPS_CATALOG_CONF,
APPS_CATALOG_CRON_PATH,
APPS_CATALOG_API_VERSION,
APPS_CATALOG_DEFAULT_URL)
APPS_CATALOG_DEFAULT_URL_FULL = _actual_apps_catalog_api_url(APPS_CATALOG_DEFAULT_URL)
CRON_FOLDER, CRON_NAME = APPS_CATALOG_CRON_PATH.rsplit("/", 1)
DUMMY_APP_CATALOG = """{
"foo": {"id": "foo", "level": 4},
"bar": {"id": "bar", "level": 7}
}
"""
class AnyStringWith(str):
def __eq__(self, other):
return self in other
def setup_function(function):
# Clear apps catalog cache
shutil.rmtree(APPS_CATALOG_CACHE, ignore_errors=True)
# Clear apps_catalog cron
if os.path.exists(APPS_CATALOG_CRON_PATH):
os.remove(APPS_CATALOG_CRON_PATH)
# Clear apps_catalog conf
if os.path.exists(APPS_CATALOG_CONF):
os.remove(APPS_CATALOG_CONF)
def teardown_function(function):
# Clear apps catalog cache
# Otherwise when using apps stuff after running the test,
# we'll still have the dummy unusable list
shutil.rmtree(APPS_CATALOG_CACHE, ignore_errors=True)
def cron_job_is_there():
r = os.system("run-parts -v --test %s | grep %s" % (CRON_FOLDER, CRON_NAME))
return r == 0
#
# ################################################
#
def test_apps_catalog_init(mocker):
# Cache is empty
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
# Conf doesn't exist yet
assert not os.path.exists(APPS_CATALOG_CONF)
# Conf doesn't exist yet
assert not os.path.exists(APPS_CATALOG_CRON_PATH)
# Initialize ...
mocker.spy(m18n, "n")
_initialize_apps_catalog_system()
m18n.n.assert_any_call('apps_catalog_init_success')
# Then there's a cron enabled
assert cron_job_is_there()
# And a conf with at least one list
assert os.path.exists(APPS_CATALOG_CONF)
apps_catalog_list = _read_apps_catalog_list()
assert len(apps_catalog_list)
# Cache is expected to still be empty though
# (if we did update the apps_catalog during init,
# we couldn't differentiate easily exceptions
# related to lack of network connectivity)
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
def test_apps_catalog_emptylist():
# Initialize ...
_initialize_apps_catalog_system()
# Let's imagine somebody removed the default apps catalog because uh idk they dont want to use our default apps catalog
os.system("rm %s" % APPS_CATALOG_CONF)
os.system("touch %s" % APPS_CATALOG_CONF)
apps_catalog_list = _read_apps_catalog_list()
assert not len(apps_catalog_list)
def test_apps_catalog_update_success(mocker):
# Initialize ...
_initialize_apps_catalog_system()
# Cache is empty
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
# Update
with requests_mock.Mocker() as m:
_actual_apps_catalog_api_url,
# Mock the server response with a dummy apps catalog
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_updating")
m18n.n.assert_any_call("apps_catalog_update_success")
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
app_dict = _load_apps_catalog()
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
def test_apps_catalog_update_404(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# 404 error
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL,
status_code=404)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_update_timeout(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# Timeout
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL,
exc=requests.exceptions.ConnectTimeout)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_update_sslerror(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# SSL error
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL,
exc=requests.exceptions.SSLError)
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_update_corrupted(mocker):
# Initialize ...
_initialize_apps_catalog_system()
with requests_mock.Mocker() as m:
# Corrupted json
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL,
text=DUMMY_APP_CATALOG[:-2])
with pytest.raises(YunohostError):
mocker.spy(m18n, "n")
_update_apps_catalog()
m18n.n.assert_any_call("apps_catalog_failed_to_download")
def test_apps_catalog_load_with_empty_cache(mocker):
# Initialize ...
_initialize_apps_catalog_system()
# Cache is empty
assert not glob.glob(APPS_CATALOG_CACHE + "/*")
# Update
with requests_mock.Mocker() as m:
# Mock the server response with a dummy apps catalog
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
# Try to load the apps catalog
# This should implicitly trigger an update in the background
mocker.spy(m18n, "n")
app_dict = _load_apps_catalog()
m18n.n.assert_any_call("apps_catalog_obsolete_cache")
m18n.n.assert_any_call("apps_catalog_update_success")
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
def test_apps_catalog_load_with_conflicts_between_lists(mocker):
# Initialize ...
_initialize_apps_catalog_system()
conf = [{"id": "default", "url": APPS_CATALOG_DEFAULT_URL},
{"id": "default2", "url": APPS_CATALOG_DEFAULT_URL.replace("yunohost.org", "yolohost.org")}]
write_to_yaml(APPS_CATALOG_CONF, conf)
# Update
with requests_mock.Mocker() as m:
# Mock the server response with a dummy apps catalog
# + the same apps catalog for the second list
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL.replace("yunohost.org", "yolohost.org"), text=DUMMY_APP_CATALOG)
# Try to load the apps catalog
# This should implicitly trigger an update in the background
mocker.spy(logger, "warning")
app_dict = _load_apps_catalog()
logger.warning.assert_any_call(AnyStringWith("Duplicate"))
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
def test_apps_catalog_load_with_oudated_api_version(mocker):
# Initialize ...
_initialize_apps_catalog_system()
# Update
with requests_mock.Mocker() as m:
mocker.spy(m18n, "n")
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
_update_apps_catalog()
# Cache shouldn't be empty anymore empty
assert glob.glob(APPS_CATALOG_CACHE + "/*")
# Tweak the cache to replace the from_api_version with a different one
for cache_file in glob.glob(APPS_CATALOG_CACHE + "/*"):
cache_json = read_json(cache_file)
assert cache_json["from_api_version"] == APPS_CATALOG_API_VERSION
cache_json["from_api_version"] = 0
write_to_json(cache_file, cache_json)
# Update
with requests_mock.Mocker() as m:
# Mock the server response with a dummy apps catalog
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
mocker.spy(m18n, "n")
app_dict = _load_apps_catalog()
m18n.n.assert_any_call("apps_catalog_update_success")
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
# Check that we indeed have the new api number in cache
for cache_file in glob.glob(APPS_CATALOG_CACHE + "/*"):
cache_json = read_json(cache_file)
assert cache_json["from_api_version"] == APPS_CATALOG_API_VERSION
def test_apps_catalog_migrate_legacy_explicitly():
open("/etc/yunohost/appslists.json", "w").write('{"yunohost": {"yolo":"swag"}}')
mkdir(APPS_CATALOG_CACHE, 0o750, parents=True)
open(APPS_CATALOG_CACHE+"/yunohost_old.json", "w").write('{"foo":{}, "bar": {}}')
open(APPS_CATALOG_CRON_PATH, "w").write("# Some old cron")
from yunohost.tools import _get_migration_by_name
migration = _get_migration_by_name("futureproof_apps_catalog_system")
with requests_mock.Mocker() as m:
# Mock the server response with a dummy apps catalog
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
migration.migrate()
# Old conf shouldnt be there anymore (got renamed to .old)
assert not os.path.exists("/etc/yunohost/appslists.json")
# Old cache should have been removed
assert not os.path.exists(APPS_CATALOG_CACHE+"/yunohost_old.json")
# Cron should have been changed
assert "/bin/bash" in open(APPS_CATALOG_CRON_PATH, "r").read()
assert cron_job_is_there()
# Reading the apps_catalog should work
app_dict = _load_apps_catalog()
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
def test_apps_catalog_migrate_legacy_implicitly():
open("/etc/yunohost/appslists.json", "w").write('{"yunohost": {"yolo":"swag"}}')
mkdir(APPS_CATALOG_CACHE, 0o750, parents=True)
open(APPS_CATALOG_CACHE+"/yunohost_old.json", "w").write('{"old_foo":{}, "old_bar": {}}')
open(APPS_CATALOG_CRON_PATH, "w").write("# Some old cron")
with requests_mock.Mocker() as m:
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
app_dict = _load_apps_catalog()
assert "foo" in app_dict.keys()
assert "bar" in app_dict.keys()
# Old conf shouldnt be there anymore (got renamed to .old)
assert not os.path.exists("/etc/yunohost/appslists.json")
# Old cache should have been removed
assert not os.path.exists(APPS_CATALOG_CACHE+"/yunohost_old.json")
# Cron should have been changed
assert "/bin/bash" in open(APPS_CATALOG_CRON_PATH, "r").read()
assert cron_job_is_there()

View file

@ -1,389 +0,0 @@
import os
import pytest
import requests
import requests_mock
import glob
import time
from yunohost.utils.error import YunohostError
from yunohost.app import app_fetchlist, app_removelist, app_listlists, _using_legacy_appslist_system, _migrate_appslist_system, _register_new_appslist
URL_OFFICIAL_APP_LIST = "https://app.yunohost.org/official.json"
REPO_PATH = '/var/cache/yunohost/repo'
APPSLISTS_JSON = '/etc/yunohost/appslists.json'
def setup_function(function):
# Clear all appslist
files = glob.glob(REPO_PATH + "/*")
for f in files:
os.remove(f)
# Clear appslist crons
files = glob.glob("/etc/cron.d/yunohost-applist-*")
for f in files:
os.remove(f)
if os.path.exists("/etc/cron.daily/yunohost-fetch-appslists"):
os.remove("/etc/cron.daily/yunohost-fetch-appslists")
if os.path.exists(APPSLISTS_JSON):
os.remove(APPSLISTS_JSON)
def teardown_function(function):
pass
def cron_job_is_there():
r = os.system("run-parts -v --test /etc/cron.daily/ | grep yunohost-fetch-appslists")
return r == 0
#
# Test listing of appslists and registering of appslists #
#
def test_appslist_list_empty():
"""
Calling app_listlists() with no registered list should return empty dict
"""
assert app_listlists() == {}
def test_appslist_list_register():
"""
Register a new list
"""
# Assume we're starting with an empty app list
assert app_listlists() == {}
# Register a new dummy list
_register_new_appslist("https://lol.com/appslist.json", "dummy")
appslist_dict = app_listlists()
assert "dummy" in appslist_dict.keys()
assert appslist_dict["dummy"]["url"] == "https://lol.com/appslist.json"
assert cron_job_is_there()
def test_appslist_list_register_conflict_name():
"""
Attempt to register a new list with conflicting name
"""
_register_new_appslist("https://lol.com/appslist.json", "dummy")
with pytest.raises(YunohostError):
_register_new_appslist("https://lol.com/appslist2.json", "dummy")
appslist_dict = app_listlists()
assert "dummy" in appslist_dict.keys()
assert "dummy2" not in appslist_dict.keys()
def test_appslist_list_register_conflict_url():
"""
Attempt to register a new list with conflicting url
"""
_register_new_appslist("https://lol.com/appslist.json", "dummy")
with pytest.raises(YunohostError):
_register_new_appslist("https://lol.com/appslist.json", "plopette")
appslist_dict = app_listlists()
assert "dummy" in appslist_dict.keys()
assert "plopette" not in appslist_dict.keys()
#
# Test fetching of appslists #
#
def test_appslist_fetch():
"""
Do a fetchlist and test the .json got updated.
"""
assert app_listlists() == {}
_register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost")
with requests_mock.Mocker() as m:
# Mock the server response with a valid (well, empty, yep) json
m.register_uri("GET", URL_OFFICIAL_APP_LIST, text='{ }')
official_lastUpdate = app_listlists()["yunohost"]["lastUpdate"]
app_fetchlist()
new_official_lastUpdate = app_listlists()["yunohost"]["lastUpdate"]
assert new_official_lastUpdate > official_lastUpdate
def test_appslist_fetch_single_appslist():
"""
Register several lists but only fetch one. Check only one got updated.
"""
assert app_listlists() == {}
_register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost")
_register_new_appslist("https://lol.com/appslist.json", "dummy")
time.sleep(1)
with requests_mock.Mocker() as m:
# Mock the server response with a valid (well, empty, yep) json
m.register_uri("GET", URL_OFFICIAL_APP_LIST, text='{ }')
official_lastUpdate = app_listlists()["yunohost"]["lastUpdate"]
dummy_lastUpdate = app_listlists()["dummy"]["lastUpdate"]
app_fetchlist(name="yunohost")
new_official_lastUpdate = app_listlists()["yunohost"]["lastUpdate"]
new_dummy_lastUpdate = app_listlists()["dummy"]["lastUpdate"]
assert new_official_lastUpdate > official_lastUpdate
assert new_dummy_lastUpdate == dummy_lastUpdate
def test_appslist_fetch_unknownlist():
"""
Attempt to fetch an unknown list
"""
assert app_listlists() == {}
with pytest.raises(YunohostError):
app_fetchlist(name="swag")
def test_appslist_fetch_url_but_no_name():
"""
Do a fetchlist with url given, but no name given
"""
with pytest.raises(YunohostError):
app_fetchlist(url=URL_OFFICIAL_APP_LIST)
def test_appslist_fetch_badurl():
"""
Do a fetchlist with a bad url
"""
app_fetchlist(url="https://not.a.valid.url/plop.json", name="plop")
def test_appslist_fetch_badfile():
"""
Do a fetchlist and mock a response with a bad json
"""
assert app_listlists() == {}
_register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost")
with requests_mock.Mocker() as m:
m.register_uri("GET", URL_OFFICIAL_APP_LIST, text='{ not json lol }')
app_fetchlist()
def test_appslist_fetch_404():
"""
Do a fetchlist and mock a 404 response
"""
assert app_listlists() == {}
_register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost")
with requests_mock.Mocker() as m:
m.register_uri("GET", URL_OFFICIAL_APP_LIST, status_code=404)
app_fetchlist()
def test_appslist_fetch_sslerror():
"""
Do a fetchlist and mock an SSL error
"""
assert app_listlists() == {}
_register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost")
with requests_mock.Mocker() as m:
m.register_uri("GET", URL_OFFICIAL_APP_LIST,
exc=requests.exceptions.SSLError)
app_fetchlist()
def test_appslist_fetch_timeout():
"""
Do a fetchlist and mock a timeout
"""
assert app_listlists() == {}
_register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost")
with requests_mock.Mocker() as m:
m.register_uri("GET", URL_OFFICIAL_APP_LIST,
exc=requests.exceptions.ConnectTimeout)
app_fetchlist()
#
# Test remove of appslist #
#
def test_appslist_remove():
"""
Register a new appslist, then remove it
"""
# Assume we're starting with an empty app list
assert app_listlists() == {}
# Register a new dummy list
_register_new_appslist("https://lol.com/appslist.json", "dummy")
app_removelist("dummy")
# Should end up with no list registered
assert app_listlists() == {}
def test_appslist_remove_unknown():
"""
Attempt to remove an unknown list
"""
with pytest.raises(YunohostError):
app_removelist("dummy")
#
# Test migration from legacy appslist system #
#
def add_legacy_cron(name, url):
with open("/etc/cron.d/yunohost-applist-%s" % name, "w") as f:
f.write('00 00 * * * root yunohost app fetchlist -u %s -n %s > /dev/null 2>&1\n' % (url, name))
def test_appslist_check_using_legacy_system_testFalse():
"""
If no legacy cron job is there, the check should return False
"""
assert glob.glob("/etc/cron.d/yunohost-applist-*") == []
assert _using_legacy_appslist_system() is False
def test_appslist_check_using_legacy_system_testTrue():
"""
If there's a legacy cron job, the check should return True
"""
assert glob.glob("/etc/cron.d/yunohost-applist-*") == []
add_legacy_cron("yunohost", "https://app.yunohost.org/official.json")
assert _using_legacy_appslist_system() is True
def test_appslist_system_migration():
"""
Test that legacy cron jobs get migrated correctly when calling app_listlists
"""
# Start with no legacy cron, no appslist registered
assert glob.glob("/etc/cron.d/yunohost-applist-*") == []
assert app_listlists() == {}
assert not os.path.exists("/etc/cron.daily/yunohost-fetch-appslists")
# Add a few legacy crons
add_legacy_cron("yunohost", "https://app.yunohost.org/official.json")
add_legacy_cron("dummy", "https://swiggitty.swaggy.lol/yolo.json")
# Migrate
assert _using_legacy_appslist_system() is True
_migrate_appslist_system()
assert _using_legacy_appslist_system() is False
# No legacy cron job should remain
assert glob.glob("/etc/cron.d/yunohost-applist-*") == []
# Check they are in app_listlists anyway
appslist_dict = app_listlists()
assert "yunohost" in appslist_dict.keys()
assert appslist_dict["yunohost"]["url"] == "https://app.yunohost.org/official.json"
assert "dummy" in appslist_dict.keys()
assert appslist_dict["dummy"]["url"] == "https://swiggitty.swaggy.lol/yolo.json"
assert cron_job_is_there()
def test_appslist_system_migration_badcron():
"""
Test the migration on a bad legacy cron (no url found inside cron job)
"""
# Start with no legacy cron, no appslist registered
assert glob.glob("/etc/cron.d/yunohost-applist-*") == []
assert app_listlists() == {}
assert not os.path.exists("/etc/cron.daily/yunohost-fetch-appslists")
# Add a "bad" legacy cron
add_legacy_cron("wtflist", "ftp://the.fuck.is.this")
# Migrate
assert _using_legacy_appslist_system() is True
_migrate_appslist_system()
assert _using_legacy_appslist_system() is False
# No legacy cron should remain, but it should be backuped in /etc/yunohost
assert glob.glob("/etc/cron.d/yunohost-applist-*") == []
assert os.path.exists("/etc/yunohost/wtflist.oldlist.bkp")
# Appslist should still be empty
assert app_listlists() == {}
def test_appslist_system_migration_conflict():
"""
Test migration of conflicting cron job (in terms of url)
"""
# Start with no legacy cron, no appslist registered
assert glob.glob("/etc/cron.d/yunohost-applist-*") == []
assert app_listlists() == {}
assert not os.path.exists("/etc/cron.daily/yunohost-fetch-appslists")
# Add a few legacy crons
add_legacy_cron("yunohost", "https://app.yunohost.org/official.json")
add_legacy_cron("dummy", "https://app.yunohost.org/official.json")
# Migrate
assert _using_legacy_appslist_system() is True
_migrate_appslist_system()
assert _using_legacy_appslist_system() is False
# No legacy cron job should remain
assert glob.glob("/etc/cron.d/yunohost-applist-*") == []
# Only one among "dummy" and "yunohost" should be listed
appslist_dict = app_listlists()
assert (len(appslist_dict.keys()) == 1)
assert ("dummy" in appslist_dict.keys()) or ("yunohost" in appslist_dict.keys())
assert cron_job_is_there()

View file

@ -30,23 +30,20 @@ import json
import subprocess
import pwd
import socket
from xmlrpclib import Fault
from importlib import import_module
from collections import OrderedDict
from moulinette import msignals, m18n
from moulinette.utils.log import getActionLogger
from moulinette.utils.process import check_output, call_async_output
from moulinette.utils.filesystem import read_json, write_to_json, read_yaml, write_to_yaml
from yunohost.app import app_fetchlist, app_info, app_upgrade, app_ssowatconf, app_list, _install_appslist_fetch_cron
from yunohost.domain import domain_add, domain_list, _get_maindomain, _set_maindomain
from yunohost.app import _update_apps_catalog, app_info, app_upgrade, app_ssowatconf, app_list
from yunohost.domain import domain_add, domain_list
from yunohost.dyndns import _dyndns_available, _dyndns_provides
from yunohost.firewall import firewall_upnp
from yunohost.service import service_status, service_start, service_enable
from yunohost.service import service_start, service_enable
from yunohost.regenconf import regen_conf
from yunohost.monitor import monitor_disk, monitor_system
from yunohost.utils.packages import ynh_packages_version, _dump_sources_list, _list_upgradable_apt_packages
from yunohost.utils.network import get_public_ip
from yunohost.utils.packages import _dump_sources_list, _list_upgradable_apt_packages
from yunohost.utils.error import YunohostError
from yunohost.log import is_unit_operation, OperationLogger
@ -164,60 +161,10 @@ def tools_adminpw(new_password, check_strength=True):
logger.success(m18n.n('admin_password_changed'))
@is_unit_operation()
def tools_maindomain(operation_logger, new_domain=None):
"""
Check the current main domain, or change it
Keyword argument:
new_domain -- The new domain to be set as the main domain
"""
# If no new domain specified, we return the current main domain
if not new_domain:
return {'current_main_domain': _get_maindomain()}
# Check domain exists
if new_domain not in domain_list()['domains']:
raise YunohostError('domain_unknown')
operation_logger.related_to.append(('domain', new_domain))
operation_logger.start()
# Apply changes to ssl certs
ssl_key = "/etc/ssl/private/yunohost_key.pem"
ssl_crt = "/etc/ssl/private/yunohost_crt.pem"
new_ssl_key = "/etc/yunohost/certs/%s/key.pem" % new_domain
new_ssl_crt = "/etc/yunohost/certs/%s/crt.pem" % new_domain
try:
if os.path.exists(ssl_key) or os.path.lexists(ssl_key):
os.remove(ssl_key)
if os.path.exists(ssl_crt) or os.path.lexists(ssl_crt):
os.remove(ssl_crt)
os.symlink(new_ssl_key, ssl_key)
os.symlink(new_ssl_crt, ssl_crt)
_set_maindomain(new_domain)
except Exception as e:
logger.warning("%s" % e, exc_info=1)
raise YunohostError('maindomain_change_failed')
_set_hostname(new_domain)
# Generate SSOwat configuration file
app_ssowatconf()
# Regen configurations
try:
with open('/etc/yunohost/installed', 'r'):
regen_conf()
except IOError:
pass
logger.success(m18n.n('maindomain_changed'))
def tools_maindomain(new_main_domain=None):
from yunohost.domain import domain_main_domain
logger.warning(m18n.g("deprecated_command_alias", prog="yunohost", old="tools maindomain", new="domain main-domain"))
return domain_main_domain(new_main_domain=new_main_domain)
def _set_hostname(hostname, pretty_hostname=None):
@ -281,6 +228,7 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False,
"""
from yunohost.utils.password import assert_password_is_strong_enough
from yunohost.domain import domain_main_domain
dyndns_provider = "dyndns.yunohost.org"
@ -395,7 +343,7 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False,
# New domain config
regen_conf(['nsswitch'], force=True)
domain_add(domain, dyndns)
tools_maindomain(domain)
domain_main_domain(domain)
# Change LDAP admin password
tools_adminpw(password, check_strength=not force_password)
@ -403,15 +351,17 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False,
# Enable UPnP silently and reload firewall
firewall_upnp('enable', no_refresh=True)
# Setup the default apps list with cron job
# Initialize the apps catalog system
_initialize_apps_catalog_system()
# Try to update the apps catalog ...
# we don't fail miserably if this fails,
# because that could be for example an offline installation...
try:
app_fetchlist(name="yunohost",
url="https://app.yunohost.org/apps.json")
_update_apps_catalog()
except Exception as e:
logger.warning(str(e))
_install_appslist_fetch_cron()
# Init migrations (skip them, no need to run them on a fresh system)
_skip_all_migrations()
@ -502,11 +452,10 @@ def tools_update(apps=False, system=False):
upgradable_apps = []
if apps:
logger.info(m18n.n('updating_app_lists'))
try:
app_fetchlist()
_update_apps_catalog()
except YunohostError as e:
logger.error(m18n.n('tools_update_failed_to_app_fetchlist'), error=e)
logger.error(str(e))
upgradable_apps = list(_list_upgradable_apps())
@ -721,203 +670,6 @@ def tools_upgrade(operation_logger, apps=None, system=False):
operation_logger.success()
def tools_diagnosis(private=False):
"""
Return global info about current yunohost instance to help debugging
"""
diagnosis = OrderedDict()
# Debian release
try:
with open('/etc/debian_version', 'r') as f:
debian_version = f.read().rstrip()
except IOError as e:
logger.warning(m18n.n('diagnosis_debian_version_error', error=format(e)), exc_info=1)
else:
diagnosis['host'] = "Debian %s" % debian_version
# Kernel version
try:
with open('/proc/sys/kernel/osrelease', 'r') as f:
kernel_version = f.read().rstrip()
except IOError as e:
logger.warning(m18n.n('diagnosis_kernel_version_error', error=format(e)), exc_info=1)
else:
diagnosis['kernel'] = kernel_version
# Packages version
diagnosis['packages'] = ynh_packages_version()
diagnosis["backports"] = check_output("dpkg -l |awk '/^ii/ && $3 ~ /bpo[6-8]/ {print $2}'").split()
# Server basic monitoring
diagnosis['system'] = OrderedDict()
try:
disks = monitor_disk(units=['filesystem'], human_readable=True)
except (YunohostError, Fault) as e:
logger.warning(m18n.n('diagnosis_monitor_disk_error', error=format(e)), exc_info=1)
else:
diagnosis['system']['disks'] = {}
for disk in disks:
if isinstance(disks[disk], str):
diagnosis['system']['disks'][disk] = disks[disk]
else:
diagnosis['system']['disks'][disk] = 'Mounted on %s, %s (%s free)' % (
disks[disk]['mnt_point'],
disks[disk]['size'],
disks[disk]['avail']
)
try:
system = monitor_system(units=['cpu', 'memory'], human_readable=True)
except YunohostError as e:
logger.warning(m18n.n('diagnosis_monitor_system_error', error=format(e)), exc_info=1)
else:
diagnosis['system']['memory'] = {
'ram': '%s (%s free)' % (system['memory']['ram']['total'], system['memory']['ram']['free']),
'swap': '%s (%s free)' % (system['memory']['swap']['total'], system['memory']['swap']['free']),
}
# nginx -t
p = subprocess.Popen("nginx -t".split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = p.communicate()
diagnosis["nginx"] = out.strip().split("\n")
if p.returncode != 0:
logger.error(out)
# Services status
services = service_status()
diagnosis['services'] = {}
for service in services:
diagnosis['services'][service] = "%s (%s)" % (services[service]['status'], services[service]['loaded'])
# YNH Applications
try:
applications = app_list()['apps']
except YunohostError as e:
diagnosis['applications'] = m18n.n('diagnosis_no_apps')
else:
diagnosis['applications'] = {}
for application in applications:
if application['installed']:
diagnosis['applications'][application['id']] = application['label'] if application['label'] else application['name']
# Private data
if private:
diagnosis['private'] = OrderedDict()
# Public IP
diagnosis['private']['public_ip'] = {}
diagnosis['private']['public_ip']['IPv4'] = get_public_ip(4)
diagnosis['private']['public_ip']['IPv6'] = get_public_ip(6)
# Domains
diagnosis['private']['domains'] = domain_list()['domains']
diagnosis['private']['regen_conf'] = regen_conf(with_diff=True, dry_run=True)
try:
diagnosis['security'] = {
"CVE-2017-5754": {
"name": "meltdown",
"vulnerable": _check_if_vulnerable_to_meltdown(),
}
}
except Exception as e:
import traceback
traceback.print_exc()
logger.warning("Unable to check for meltdown vulnerability: %s" % e)
return diagnosis
def _check_if_vulnerable_to_meltdown():
# meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754
# We use a cache file to avoid re-running the script so many times,
# which can be expensive (up to around 5 seconds on ARM)
# and make the admin appear to be slow (c.f. the calls to diagnosis
# from the webadmin)
#
# The cache is in /tmp and shall disappear upon reboot
# *or* we compare it to dpkg.log modification time
# such that it's re-ran if there was package upgrades
# (e.g. from yunohost)
cache_file = "/tmp/yunohost-meltdown-diagnosis"
dpkg_log = "/var/log/dpkg.log"
if os.path.exists(cache_file):
if not os.path.exists(dpkg_log) or os.path.getmtime(cache_file) > os.path.getmtime(dpkg_log):
logger.debug("Using cached results for meltdown checker, from %s" % cache_file)
return read_json(cache_file)[0]["VULNERABLE"]
# script taken from https://github.com/speed47/spectre-meltdown-checker
# script commit id is store directly in the script
file_dir = os.path.split(__file__)[0]
SCRIPT_PATH = os.path.join(file_dir, "./vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh")
# '--variant 3' corresponds to Meltdown
# example output from the script:
# [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}]
try:
logger.debug("Running meltdown vulnerability checker")
call = subprocess.Popen("bash %s --batch json --variant 3" %
SCRIPT_PATH, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# TODO / FIXME : here we are ignoring error messages ...
# in particular on RPi2 and other hardware, the script complains about
# "missing some kernel info (see -v), accuracy might be reduced"
# Dunno what to do about that but we probably don't want to harass
# users with this warning ...
output, err = call.communicate()
assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode
# If there are multiple lines, sounds like there was some messages
# in stdout that are not json >.> ... Try to get the actual json
# stuff which should be the last line
output = output.strip()
if "\n" in output:
logger.debug("Original meltdown checker output : %s" % output)
output = output.split("\n")[-1]
CVEs = json.loads(output)
assert len(CVEs) == 1
assert CVEs[0]["NAME"] == "MELTDOWN"
except Exception as e:
import traceback
traceback.print_exc()
logger.warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e)
raise Exception("Command output for failed meltdown check: '%s'" % output)
logger.debug("Writing results from meltdown checker to cache file, %s" % cache_file)
write_to_json(cache_file, CVEs)
return CVEs[0]["VULNERABLE"]
def tools_port_available(port):
"""
Check availability of a local port
Keyword argument:
port -- Port to check
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect(("localhost", int(port)))
s.close()
except socket.error:
return True
else:
return False
@is_unit_operation()
def tools_shutdown(operation_logger, force=False):
shutdown = force

View file

@ -33,36 +33,6 @@ logger = logging.getLogger('yunohost.utils.packages')
# Exceptions -----------------------------------------------------------------
class PackageException(Exception):
"""Base exception related to a package
Represent an exception related to the package named `pkgname`. If no
`message` is provided, it will first try to use the translation key
`message_key` if defined by the derived class. Otherwise, a standard
message will be used.
"""
message_key = 'package_unexpected_error'
def __init__(self, pkgname, message=None):
super(PackageException, self).__init__(
message or m18n.n(self.message_key, pkgname=pkgname))
self.pkgname = pkgname
class UnknownPackage(PackageException):
"""The package is not found in the cache."""
message_key = 'package_unknown'
class UninstalledPackage(PackageException):
"""The package is not installed."""
message_key = 'package_not_installed'
class InvalidSpecifier(ValueError):
"""An invalid specifier was found."""
@ -402,43 +372,43 @@ def get_installed_version(*pkgnames, **kwargs):
"""Get the installed version of package(s)
Retrieve one or more packages named `pkgnames` and return their installed
version as a dict or as a string if only one is requested and `as_dict` is
`False`. If `strict` is `True`, an exception will be raised if a package
is unknown or not installed.
version as a dict or as a string if only one is requested.
"""
versions = OrderedDict()
cache = apt.Cache()
# Retrieve options
as_dict = kwargs.get('as_dict', False)
strict = kwargs.get('strict', False)
with_repo = kwargs.get('with_repo', False)
for pkgname in pkgnames:
try:
pkg = cache[pkgname]
except KeyError:
if strict:
raise UnknownPackage(pkgname)
logger.warning(m18n.n('package_unknown', pkgname=pkgname))
if with_repo:
versions[pkgname] = {
"version": None,
"repo": None,
}
else:
versions[pkgname] = None
continue
try:
version = pkg.installed.version
except AttributeError:
if strict:
raise UninstalledPackage(pkgname)
version = None
try:
# stable, testing, unstable
repo = pkg.installed.origins[0].component
except AttributeError:
if strict:
raise UninstalledPackage(pkgname)
repo = ""
if repo == "now":
repo = "local"
if with_repo:
versions[pkgname] = {
"version": version,
@ -449,7 +419,7 @@ def get_installed_version(*pkgnames, **kwargs):
else:
versions[pkgname] = version
if len(pkgnames) == 1 and not as_dict:
if len(pkgnames) == 1:
return versions[pkgnames[0]]
return versions