Merge pull request #165 from netinvent/email-support

Add Email notification support
This commit is contained in:
Orsiris de Jong 2025-06-13 14:57:25 +02:00 committed by GitHub
commit 8dcdb76401
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 524 additions and 198 deletions

View file

@ -287,7 +287,7 @@ def compile(
NUITKA_OPTIONS += " --plugin-enable=tk-inter"
# So for an unknown reason, some windows builds will not hide the console, see #146
# We replaced this option with a python version in gui\__main__.py
# NUITKA_OPTIONS += " --windows-console-mode=hide"
NUITKA_OPTIONS += " --windows-console-mode=hide"
else:
NUITKA_OPTIONS += " --plugin-disable=tk-inter --nofollow-import-to=FreeSimpleGUI --nofollow-import-to=_tkinter --nofollow-import-to=npbackup.gui"
if onefile:

View file

@ -7,8 +7,8 @@ __intname__ = "npbackup.configuration"
__author__ = "Orsiris de Jong"
__copyright__ = "Copyright (C) 2022-2025 NetInvent"
__license__ = "GPL-3.0-only"
__build__ = "2025040401"
__version__ = "npbackup 3.0.0+"
__build__ = "2025061301"
__version__ = "npbackup 3.0.3+"
from typing import Tuple, Optional, List, Any, Union
@ -105,6 +105,8 @@ ENCRYPTED_OPTIONS = [
"repo_opts.repo_password_command",
"global_prometheus.http_username",
"global_prometheus.http_password",
"global_email.smtp_username",
"global_email.smtp_password",
"env.encrypted_env_variables",
"global_options.auto_upgrade_server_username",
"global_options.auto_upgrade_server_password",
@ -219,6 +221,20 @@ empty_config_dict = {
"additional_labels": [],
"no_cert_verify": False,
},
"global_email": {
"enable": False,
"instance": "${MACHINE_ID}",
"smtp_server": None,
"smtp_port": 587,
"smtp_username": None,
"smtp_password": None,
"sender": None,
"recipients": None,
"on_backup_success": True,
"on_backup_failure": True,
"on_operations_success": False,
"on_operations_failure": True,
},
"global_options": {
"auto_upgrade": False,
"auto_upgrade_percent_chance": 5, # On all runs. On 15m interval runs, this could be 5% (ie once a day), on daily runs, this should be 95% (ie once a day)
@ -730,33 +746,16 @@ def get_repo_config(
return None, None
# Merge prometheus global settings with repo settings
prometheus_backup_job = None
try:
prometheus_backup_job = repo_config.g("prometheus.backup_job")
repo_config.s("global_email", deepcopy(full_config.g("global_email")))
except KeyError:
logger.info(
"No prometheus backup job found in repo config. Setting backup job to machine id"
)
prometheus_backup_job = full_config.g("identity.machine_id")
prometheus_group = None
try:
prometheus_group = repo_config.g("prometheus.group")
except KeyError:
logger.info(
"No prometheus group found in repo config. Setting prometheus group to machine group"
)
prometheus_group = full_config.g("identity.machine_group")
logger.info("No global email settings found")
try:
repo_config.s("prometheus", deepcopy(full_config.g("global_prometheus")))
repo_config.s("global_prometheus", deepcopy(full_config.g("global_prometheus")))
except KeyError:
logger.info("No global prometheus settings found")
if prometheus_backup_job:
repo_config.s("prometheus.backup_job", prometheus_backup_job)
if prometheus_group:
repo_config.s("prometheus.group", prometheus_group)
try:
repo_group = full_config.g(f"repos.{repo_name}.repo_group")
group_config = full_config.g(f"groups.{repo_group}")

337
npbackup/core/metrics.py Normal file
View file

@ -0,0 +1,337 @@
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of npbackup
__intname__ = "npbackup.core.metrics"
__author__ = "Orsiris de Jong"
__copyright__ = "Copyright (C) 2022-2025 NetInvent"
__license__ = "GPL-3.0-only"
__build__ = "2025061301"
import os
from typing import Optional, Tuple, List
from datetime import datetime, timezone
from logging import getLogger
from ofunctions.mailer import Mailer
from npbackup.restic_metrics import (
create_labels_string,
restic_str_output_to_json,
restic_json_to_prometheus,
upload_metrics,
write_metrics_file,
)
from npbackup.__version__ import __intname__ as NAME, version_dict
from npbackup.__debug__ import _DEBUG, fmt_json
from resources.customization import OEM_STRING
logger = getLogger()
def metric_analyser(
repo_config: dict,
restic_result: bool,
result_string: str,
operation: str,
dry_run: bool,
append_metrics_file: bool,
exec_time: Optional[float] = None,
analyze_only: bool = False,
) -> Tuple[bool, bool]:
"""
Tries to get operation success and backup to small booleans from restic output
Returns op success, backup too small
"""
operation_success = True
backup_too_small = False
timestamp = int(datetime.now(timezone.utc).timestamp())
date = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
metrics = []
repo_name = repo_config.g("name")
try:
labels = {
"npversion": f"{NAME}{version_dict['version']}-{version_dict['build_type']}",
"repo_name": repo_name,
"action": operation,
}
if repo_config.g("global_prometheus") and repo_config.g(
"global_prometheus.metrics"
):
labels["backup_job"] = repo_config.g("prometheus.backup_job")
labels["group"] = repo_config.g("prometheus.group")
labels["instance"] = repo_config.g("global_prometheus.instance")
prometheus_additional_labels = repo_config.g(
"global_prometheus.additional_labels"
)
if isinstance(prometheus_additional_labels, dict):
for k, v in prometheus_additional_labels.items():
labels[k] = v
else:
logger.error(
f"Bogus value in configuration for prometheus additional labels: {prometheus_additional_labels}"
)
# We only analyse backup output of restic
if operation == "backup":
minimum_backup_size_error = repo_config.g(
"backup_opts.minimum_backup_size_error"
)
# If result was a str, we need to transform it into json first
if isinstance(result_string, str):
restic_result = restic_str_output_to_json(restic_result, result_string)
operation_success, metrics, backup_too_small = restic_json_to_prometheus(
restic_result=restic_result,
restic_json=restic_result,
labels=labels,
minimum_backup_size_error=minimum_backup_size_error,
)
if not operation_success or not restic_result:
logger.error("Backend finished with errors.")
"""
Add a metric for informing if any warning raised while executing npbackup_tasks
CRITICAL = 50 will be 3 in this metric, but should not really exist
ERROR = 40 will be 2 in this metric
WARNING = 30 will be 1 in this metric
INFO = 20 will be 0
"""
worst_exec_level = logger.get_worst_logger_level()
if worst_exec_level == 50:
exec_state = 3
elif worst_exec_level == 40:
exec_state = 2
elif worst_exec_level == 30:
exec_state = 1
else:
exec_state = 0
# exec_state update according to metric_analyser
if not operation_success or backup_too_small:
exec_state = 2
labels_string = create_labels_string(labels)
metrics.append(
f'npbackup_exec_state{{{labels_string},timestamp="{timestamp}"}} {exec_state}'
)
# Add upgrade state if upgrades activated
upgrade_state = os.environ.get("NPBACKUP_UPGRADE_STATE", None)
try:
upgrade_state = int(upgrade_state)
labels_string = create_labels_string(labels)
metrics.append(
f'npbackup_exec_state{{{labels_string},timestamp="{timestamp}"}} {upgrade_state}'
)
except (ValueError, TypeError):
pass
if isinstance(exec_time, (int, float)):
try:
metrics.append(
f'npbackup_exec_time{{{labels_string},timestamp="{timestamp}"}} {exec_time}'
)
except (ValueError, TypeError):
logger.warning("Cannot get exec time from environment")
if not analyze_only:
logger.debug("Metrics computed:\n{}".format("\n".join(metrics)))
send_prometheus_metrics(
repo_config,
metrics,
dry_run,
append_metrics_file,
operation,
)
send_metrics_mail(
repo_config,
operation,
restic_result=restic_result,
operation_success=operation_success,
backup_too_small=backup_too_small,
exec_state=exec_state,
date=date,
)
except KeyError as exc:
logger.info("Metrics error: {}".format(exc))
logger.debug("Trace:", exc_info=True)
except OSError as exc:
logger.error("Metrics OS error: ".format(exc))
logger.debug("Trace:", exc_info=True)
return operation_success, backup_too_small
def send_prometheus_metrics(
repo_config: dict,
metrics: List[str],
dry_run: bool = False,
append_metrics_file: bool = False,
operation: Optional[str] = None,
) -> bool:
try:
no_cert_verify = repo_config.g("global_prometheus.no_cert_verify")
if not no_cert_verify:
no_cert_verify = False
destination = repo_config.g("global_prometheus.destination")
repo_name = repo_config.g("name")
if repo_config.g("global_prometheus.metrics") is not True:
logger.debug(
"Metrics not enabled in configuration. Not sending metrics to Prometheus."
)
return False
except KeyError as exc:
logger.error("No prometheus configuration found in config file.")
return False
if destination and dry_run:
logger.info("Dry run mode. Not sending metrics.")
elif destination:
logger.debug("Sending metrics to {}".format(destination))
dest = destination.lower()
if dest.startswith("http"):
if not "metrics" in dest:
logger.error(
"Destination does not contain 'metrics' keyword. Not uploading."
)
return False
if not "job" in dest:
logger.error(
"Destination does not contain 'job' keyword. Not uploading."
)
return False
try:
authentication = (
repo_config.g("prometheus.http_username"),
repo_config.g("prometheus.http_password"),
)
except KeyError:
logger.info("No metrics authentication present.")
authentication = None
# Fix for #150, job name needs to be unique in order to avoid overwriting previous job in push gateway
destination = f"{destination}___repo_name={repo_name}___action={operation}"
upload_metrics(destination, authentication, no_cert_verify, metrics)
else:
write_metrics_file(destination, metrics, append=append_metrics_file)
else:
logger.debug("No metrics destination set. Not sending metrics")
def send_metrics_mail(
repo_config: dict,
operation: str,
restic_result: Optional[dict] = None,
operation_success: Optional[bool] = None,
backup_too_small: Optional[bool] = None,
exec_state: Optional[int] = None,
date: Optional[int] = None,
):
"""
Sends metrics via email.
"""
op_success = (
True
if operation_success and not backup_too_small and exec_state == 0
else False
)
repo_name = repo_config.g("name")
try:
if not repo_config.g("global_email") or repo_config.g("global_email.enable"):
logger.debug(
"Email not enabled in configuration. Not sending notifications."
)
return False
instance = repo_config.g("global_email.instance")
smtp_server = repo_config.g("global_email.smtp_server")
smtp_port = repo_config.g("global_email.smtp_port")
smtp_security = repo_config.g("global_email.smtp_security")
if not smtp_server or not smtp_port or not smtp_security:
logger.warning(
"SMTP server/port or security not set. Not sending notifications via email."
)
return False
smtp_username = repo_config.g("global_email.smtp_username")
smtp_password = repo_config.g("global_email.smtp_password")
sender = repo_config.g("global_email.sender")
recipients = repo_config.g("global_email.recipients")
if not sender or not recipients:
logger.warning(
"Sender or recipients not set. Not sending metrics via email."
)
return False
on_backup_success = repo_config.g("global_email.on_backup_success")
on_backup_failure = repo_config.g("global_email.on_backup_failure")
on_operations_success = repo_config.g("global_email.on_operations_success")
on_operations_failure = repo_config.g("global_email.on_operations_failure")
if operation == "backup":
if not on_backup_success and op_success:
logger.debug("Not sending email for backup success.")
return True
if not on_backup_failure and not op_success:
logger.debug("Not sending email for backup failure.")
return False
elif operation != "test_email":
if not on_operations_success and op_success:
logger.debug("Not sending email for operation success.")
return True
if not on_operations_failure and not op_success:
logger.debug("Not sending email for operation failure.")
return False
except KeyError as exc:
logger.error(f"Missing email configuration: {exc}")
return False
logger.info(f"Sending metrics via email to {recipients}.")
recipients = [recipient.strip() for recipient in recipients.split(",")]
mailer = Mailer(
smtp_server=smtp_server,
smtp_port=smtp_port,
security=smtp_security,
smtp_user=smtp_username,
smtp_password=smtp_password,
debug=False, # Make sure we don't send debug info so we don't get to leak passwords
)
subject = (
f"{OEM_STRING} failure report for {instance} {operation} on repo {repo_name}"
)
body = f"Operation: {operation}\nRepo: {repo_name}"
if op_success:
body += "\nStatus: Success"
subject = f"{OEM_STRING} success report for {instance} {operation} on repo {repo_name}"
elif backup_too_small:
body += "\nStatus: Backup too small"
elif exec_state == 1:
body += "\nStatus: Warning"
elif exec_state == 2:
body += "\nStatus: Error"
elif exec_state == 3:
body += "\nStatus: Critical error"
body += f"\nDate: {date}"
if isinstance(restic_result, dict):
body += f"\n\nDetail: {fmt_json(restic_result)}"
body += f"\n\nGenerated by {OEM_STRING} {version_dict['version']}\n"
try:
result = mailer.send_email(
sender_mail=sender, recipient_mails=recipients, subject=subject, body=body
)
if result:
logger.info("Metrics sent via email.")
return True
except Exception as exc:
logger.error(f"Failed to send metrics via email: {exc}")
logger.debug("Trace:", exc_info=True)
return False

View file

@ -10,7 +10,7 @@ __license__ = "GPL-3.0-only"
__build__ = "2025061201"
from typing import Optional, Callable, Union, List, Tuple
from typing import Optional, Callable, Union, List
import os
import logging
import tempfile
@ -25,19 +25,12 @@ from ofunctions.threading import threaded
from ofunctions.platform import os_arch
from ofunctions.misc import fn_name
import ntplib
from npbackup.restic_metrics import (
create_labels_string,
restic_str_output_to_json,
restic_json_to_prometheus,
upload_metrics,
write_metrics_file,
)
from npbackup.core.metrics import metric_analyser
from npbackup.restic_wrapper import ResticRunner
from npbackup.core.restic_source_binary import get_restic_internal_binary
from npbackup.core import jobs
from npbackup.path_helper import CURRENT_DIR, BASEDIR
import npbackup.pidfile_ng
from npbackup.__version__ import __intname__ as NAME, version_dict
from npbackup.__debug__ import _DEBUG, exception_to_string
from npbackup.__env__ import MAX_ALLOWED_NTP_OFFSET
@ -88,160 +81,6 @@ non_locking_operations = [
]
def metric_analyser(
repo_config: dict,
restic_result: bool,
result_string: str,
operation: str,
dry_run: bool,
append_metrics_file: bool,
exec_time: Optional[float] = None,
analyze_only: bool = False,
) -> Tuple[bool, bool]:
"""
Tries to get operation success and backup to small booleans from restic output
Returns op success, backup too small
"""
operation_success = True
backup_too_small = False
metrics = []
try:
repo_name = repo_config.g("name")
labels = {
"npversion": f"{NAME}{version_dict['version']}-{version_dict['build_type']}",
"repo_name": repo_name,
"action": operation,
}
if repo_config.g("prometheus.metrics"):
labels["instance"] = repo_config.g("prometheus.instance")
labels["backup_job"] = repo_config.g("prometheus.backup_job")
labels["group"] = repo_config.g("prometheus.group")
no_cert_verify = repo_config.g("prometheus.no_cert_verify")
destination = repo_config.g("prometheus.destination")
prometheus_additional_labels = repo_config.g("prometheus.additional_labels")
if isinstance(prometheus_additional_labels, dict):
for k, v in prometheus_additional_labels.items():
labels[k] = v
else:
logger.error(
f"Bogus value in configuration for prometheus additional labels: {prometheus_additional_labels}"
)
else:
destination = None
no_cert_verify = False
# We only analyse backup output of restic
if operation == "backup":
minimum_backup_size_error = repo_config.g(
"backup_opts.minimum_backup_size_error"
)
# If result was a str, we need to transform it into json first
if isinstance(result_string, str):
restic_result = restic_str_output_to_json(restic_result, result_string)
operation_success, metrics, backup_too_small = restic_json_to_prometheus(
restic_result=restic_result,
restic_json=restic_result,
labels=labels,
minimum_backup_size_error=minimum_backup_size_error,
)
if not operation_success or not restic_result:
logger.error("Backend finished with errors.")
"""
Add a metric for informing if any warning raised while executing npbackup_tasks
CRITICAL = 50 will be 3 in this metric, but should not really exist
ERROR = 40 will be 2 in this metric
WARNING = 30 will be 1 in this metric
INFO = 20 will be 0
"""
worst_exec_level = logger.get_worst_logger_level()
if worst_exec_level == 50:
exec_state = 3
elif worst_exec_level == 40:
exec_state = 2
elif worst_exec_level == 30:
exec_state = 1
else:
exec_state = 0
# exec_state update according to metric_analyser
if not operation_success or backup_too_small:
exec_state = 2
labels_string = create_labels_string(labels)
metrics.append(
f'npbackup_exec_state{{{labels_string},timestamp="{int(datetime.now(timezone.utc).timestamp())}"}} {exec_state}'
)
# Add upgrade state if upgrades activated
upgrade_state = os.environ.get("NPBACKUP_UPGRADE_STATE", None)
try:
upgrade_state = int(upgrade_state)
labels_string = create_labels_string(labels)
metrics.append(
f'npbackup_exec_state{{{labels_string},timestamp="{int(datetime.now(timezone.utc).timestamp())}"}} {upgrade_state}'
)
except (ValueError, TypeError):
pass
if isinstance(exec_time, (int, float)):
try:
metrics.append(
f'npbackup_exec_time{{{labels_string},timestamp="{int(datetime.now(timezone.utc).timestamp())}"}} {exec_time}'
)
except (ValueError, TypeError):
logger.warning("Cannot get exec time from environment")
if not analyze_only:
logger.debug("Metrics computed:\n{}".format("\n".join(metrics)))
if destination and dry_run:
logger.info("Dry run mode. Not sending metrics.")
elif destination:
logger.debug("Sending metrics to {}".format(destination))
dest = destination.lower()
if dest.startswith("http"):
if not "metrics" in dest:
logger.error(
"Destination does not contain 'metrics' keyword. Not uploading."
)
return backup_too_small
if not "job" in dest:
logger.error(
"Destination does not contain 'job' keyword. Not uploading."
)
return backup_too_small
try:
authentication = (
repo_config.g("prometheus.http_username"),
repo_config.g("prometheus.http_password"),
)
except KeyError:
logger.info("No metrics authentication present.")
authentication = None
# Fix for #150, job name needs to be unique in order to avoid overwriting previous job in push gateway
destination = (
f"{destination}___repo_name={repo_name}___action={operation}"
)
upload_metrics(destination, authentication, no_cert_verify, metrics)
else:
write_metrics_file(destination, metrics, append=append_metrics_file)
else:
logger.debug("No metrics destination set. Not sending metrics")
except KeyError as exc:
logger.info("Metrics error: {}".format(exc))
logger.debug("Trace:", exc_info=True)
except OSError as exc:
logger.error("Metrics OS error: ".format(exc))
logger.debug("Trace:", exc_info=True)
return operation_success, backup_too_small
def get_ntp_offset(ntp_server: str) -> Optional[float]:
"""
Get current time offset from ntp server
@ -821,7 +660,7 @@ class NPBackupRunner:
def metrics(fn: Callable):
"""
Write prometheus metrics
Analyse metrics and notify
"""
@wraps(fn)

View file

@ -7,20 +7,21 @@ __intname__ = "npbackup.gui.config"
__author__ = "Orsiris de Jong"
__copyright__ = "Copyright (C) 2022-2025 NetInvent"
__license__ = "GPL-3.0-only"
__build__ = "2025022301"
__build__ = "2025061301"
from typing import List, Tuple
import os
import re
import pathlib
from logging import getLogger
import FreeSimpleGUI as sg
import textwrap
from datetime import datetime, timezone
from ruamel.yaml.comments import CommentedMap
from npbackup import configuration
from ofunctions.misc import get_key_from_value, BytesConverter
from npbackup.core.i18n_helper import _t
from npbackup.core.metrics import send_metrics_mail
from resources.customization import (
INHERITED_ICON,
NON_INHERITED_ICON,
@ -719,7 +720,12 @@ def config_gui(full_config: dict, config_file: str):
# Only update global options gui with identified global keys
for key in full_config.keys():
if key in ("identity", "global_prometheus", "global_options"):
if key in (
"identity",
"global_prometheus",
"global_email",
"global_options",
):
global_config.s(key, full_config.g(key))
iter_over_config(global_config, None, "group", unencrypted, None)
@ -835,6 +841,7 @@ def config_gui(full_config: dict, config_file: str):
key.startswith("global_options")
or key.startswith("identity")
or key.startswith("global_prometheus")
or key.startswith("global_email")
):
active_object_key = f"{key}"
current_value = full_config.g(active_object_key)
@ -2268,7 +2275,7 @@ Google Cloud storage: GOOGLE_PROJECT_ID GOOGLE_APPLICATION_CREDENTIALS\n\
sg.Input(key="global_prometheus.http_password", size=(50, 1)),
],
[
sg.Text(_t("config_gui.instance"), size=(40, 1)),
sg.Text(_t("config_gui.prometheus_instance"), size=(40, 1)),
sg.Input(key="global_prometheus.instance", size=(50, 1)),
],
[
@ -2306,6 +2313,86 @@ Google Cloud storage: GOOGLE_PROJECT_ID GOOGLE_APPLICATION_CREDENTIALS\n\
],
]
global_email_col = [
[sg.Text(_t("config_gui.available_variables"))],
[
sg.Checkbox(
_t("config_gui.enable_email_notifications"),
key="global_email.enable",
size=(41, 1),
),
],
[
sg.Text(_t("config_gui.email_instance"), size=(40, 1)),
sg.Input(key="global_email.instance", size=(50, 1)),
],
[
sg.Text(_t("config_gui.smtp_server"), size=(40, 1)),
sg.Input(key="global_email.smtp_server", size=(50, 1)),
],
[
sg.Text(_t("config_gui.smtp_port"), size=(40, 1)),
sg.Input(key="global_email.smtp_port", size=(50, 1)),
],
[
sg.Text(_t("config_gui.smtp_security"), size=(40, 1)),
sg.Combo(
["None", "ssl", "tls"],
key="global_email.smtp_security",
size=(50, 1),
),
],
[
sg.Text(_t("config_gui.smtp_username"), size=(40, 1)),
sg.Input(key="global_email.smtp_username", size=(50, 1)),
],
[
sg.Text(_t("config_gui.smtp_password"), size=(40, 1)),
sg.Input(key="global_email.smtp_password", size=(50, 1)),
],
[
sg.Text(_t("config_gui.sender"), size=(40, 1)),
sg.Input(key="global_email.sender", size=(50, 1)),
],
[
sg.Text(_t("config_gui.recipients"), size=(40, 1)),
sg.Input(key="global_email.recipients", size=(50, 1)),
],
[
sg.Checkbox(
_t("config_gui.email_on_backup_success"),
key="global_email.on_backup_success",
size=(41, 1),
),
],
[
sg.Checkbox(
_t("config_gui.email_on_backup_failure"),
key="global_email.on_backup_failure",
size=(41, 1),
),
],
[
sg.Checkbox(
_t("config_gui.email_on_operations_success"),
key="global_email.on_operations_success",
size=(41, 1),
),
],
[
sg.Checkbox(
_t("config_gui.email_on_operations_failure"),
key="global_email.on_operations_failure",
size=(41, 1),
),
],
[
sg.Button(
_t("config_gui.test_email"), key="--TEST-EMAIL--", size=(20, 1)
)
],
]
tab_group_layout = [
[
sg.Tab(
@ -2331,6 +2418,14 @@ Google Cloud storage: GOOGLE_PROJECT_ID GOOGLE_APPLICATION_CREDENTIALS\n\
key="--tab-global-prometheus--",
)
],
[
sg.Tab(
_t("config_gui.email_config"),
global_email_col,
font="helvetica 16",
key="--tab-global-email--",
)
],
]
_layout = [
@ -2757,6 +2852,24 @@ Google Cloud storage: GOOGLE_PROJECT_ID GOOGLE_APPLICATION_CREDENTIALS\n\
tree.delete(key)
window[option_key].Update(values=tree)
continue
if event == "--TEST-EMAIL--":
repo_config, _ = configuration.get_repo_config(
full_config, object_name, eval_variables=False
)
if send_metrics_mail(
repo_config=repo_config,
operation="test_email",
restic_result=None,
operation_success=True,
backup_too_small=False,
exec_state=0,
date=datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC"),
):
sg.Popup(_t("config_gui.test_email_success"), keep_on_top=True)
else:
sg.Popup(_t("config_gui.test_email_failure"), keep_on_top=True)
# WIP
continue
if event == "--ACCEPT--":
if object_type != "groups" and not values["repo_uri"]:
sg.PopupError(

View file

@ -37,8 +37,9 @@ def handle_current_window(action: str = "minimize") -> None:
logger.debug(
"No hwndmain found for current executable, trying foreground window"
)
# This will hide the console and the main gui
# pylint: disable=I1101 (c-extension-no-member)
hwndMain = win32gui.GetForegroundWindow()
# hwndMain = win32gui.GetForegroundWindow()
if hwndMain:
if action == "minimize":
# pylint: disable=I1101 (c-extension-no-member)

View file

@ -8,6 +8,7 @@ ofunctions.threading>=2.2.0
ofunctions.platform>=1.5.1
ofunctions.random
ofunctions.requestor>=1.2.2
ofunctions.mailer>=1.3.0
python-pidfile>=3.0.0
# pysimplegui 5 has gone commercial, let's switch to freesimplegui
# keep in mind that freesimplegui might higher required python version in the future

View file

@ -49,7 +49,7 @@ en:
no_cert_verify: Do not verify SSL certificate
metrics_username: HTTP metrics username
metrics_password: HTTP metrics password
instance: Prometheus instance
prometheus_instance: Prometheus instance
additional_labels: Additional labels
no_config_available: No configuration file found. Please use --config-file "path" to specify one or copy a config file next to the NPBackup binary
@ -192,4 +192,22 @@ en:
repo_uri_cloud_hint: Cloud repo URI requires to set encrypted environment variables (see environment tab)
full_concurrency: Don't check for concurrency
repo_aware_concurrency: Allow concurrent runs on different repos
repo_aware_concurrency: Allow concurrent runs on different repos
email_config: Email configuration
enable_email_notifications: Enable email notifications
email_instance: Instance name
smtp_server: SMTP server
smtp_port: SMTP port
smtp_security: SMTP security (none, tls, ssl)
smtp_username: SMTP username
smtp_password: SMTP password
sender: Sender email address
recipients: Comma separated recipient addresses
email_on_backup_success: Email on backup success
email_on_backup_failure: Email on backup failure
email_on_operations_success: Email on all operations success
email_on_operations_failure: Email on all operations failure
test_email: Send test notification email
test_email_success: Test email sent successfully
test_email_failure: Test email failed, see logs for details

View file

@ -50,7 +50,7 @@ fr:
no_cert_verify: Ne pas vérifier le certificat SSL
metrics_username: Nom d'utilisateur métriques HTTP
metrics_password: Mot de passe métriques HTTP
instance: Instance Prometheus
prometheus_instance: Instance Prometheus
additional_labels: Etiquettes supplémentaires
no_config_available: Aucun fichier de configuration trouvé. Merci d'utiliser --config-file "chemin" pour spécifier un fichier, ou copier un fichier de configuration a côté du binaire NPBackup.
@ -194,4 +194,22 @@ fr:
repo_uri_cloud_hint: L'URI du dépot Cloud nécessite de définir des variables d'environnement chiffrées (voir l'onglet environnement)
full_concurrency: Ne pas vérifier la concurrence
repo_aware_concurrency: Autoriser concurrencesur dépots différents
repo_aware_concurrency: Autoriser concurrencesur dépots différents
email_config: Configuration email
enable_email_notifications: Activer les notifications email
email_instance: Nom d'instance
smtp_server: Serveur SMTP
smtp_port: Port SMTP
smtp_security: Sécurité
smtp_username: Nom d'utilisateur SMTP
smtp_password: Mot de passe SMTP
sender: Email expéditeur
recipients: Emails destinataires séparés par des virgules
email_on_backup_success: Email sur succès de sauvegarde
email_on_backup_failure: Email sur échec de sauvegarde
email_on_operations_success: Email sur succès des opérations
email_on_operations_failure: Email sur échec des opérations
test_email: Envoyer un email de test
test_email_success: Email de test envoyé avec succès
test_email_failure: Échec de l'envoi du test email, veuillez consulter les journaux pour plus de détails

View file

@ -54,7 +54,7 @@ groups:
repo_password:
repo_password_command:
minimum_backup_age: 1435
random_delay_before_backup: 200
random_delay_before_backup: 3
upload_speed: 100 Mib
download_speed: 0 Mib
backend_connections: 0

View file

@ -55,7 +55,7 @@ groups:
repo_password:
repo_password_command:
minimum_backup_age: 1435
random_delay_before_backup: 200
random_delay_before_backup: 3
upload_speed: 100 Mib
download_speed: 0 Mib
backend_connections: 0