WIP: Refactor metric analyzer and add email support

This commit is contained in:
deajan 2025-06-13 11:54:41 +02:00
parent 9806a36140
commit 0e550d6f51
7 changed files with 433 additions and 191 deletions

View file

@ -105,6 +105,7 @@ ENCRYPTED_OPTIONS = [
"repo_opts.repo_password_command",
"global_prometheus.http_username",
"global_prometheus.http_password",
"global_email.smtp_username" "global_email.smtp_password",
"env.encrypted_env_variables",
"global_options.auto_upgrade_server_username",
"global_options.auto_upgrade_server_password",
@ -219,6 +220,20 @@ empty_config_dict = {
"additional_labels": [],
"no_cert_verify": False,
},
"global_email": {
"enable": False,
"instance": "${MACHINE_ID}",
"smtp_server": None,
"smtp_port": 587,
"smtp_username": None,
"smtp_password": None,
"sender": None,
"recipients": None,
"on_backup_success": True,
"on_backup_failure": True,
"on_operations_success": True,
"on_operations_failure": True,
},
"global_options": {
"auto_upgrade": False,
"auto_upgrade_percent_chance": 5, # On all runs. On 15m interval runs, this could be 5% (ie once a day), on daily runs, this should be 95% (ie once a day)
@ -730,33 +745,16 @@ def get_repo_config(
return None, None
# Merge prometheus global settings with repo settings
prometheus_backup_job = None
try:
prometheus_backup_job = repo_config.g("prometheus.backup_job")
repo_config.s("global_email", deepcopy(full_config.g("global_email")))
except KeyError:
logger.info(
"No prometheus backup job found in repo config. Setting backup job to machine id"
)
prometheus_backup_job = full_config.g("identity.machine_id")
prometheus_group = None
try:
prometheus_group = repo_config.g("prometheus.group")
except KeyError:
logger.info(
"No prometheus group found in repo config. Setting prometheus group to machine group"
)
prometheus_group = full_config.g("identity.machine_group")
logger.info("No global email settings found")
try:
repo_config.s("prometheus", deepcopy(full_config.g("global_prometheus")))
repo_config.s("global_prometheus", deepcopy(full_config.g("global_prometheus")))
except KeyError:
logger.info("No global prometheus settings found")
if prometheus_backup_job:
repo_config.s("prometheus.backup_job", prometheus_backup_job)
if prometheus_group:
repo_config.s("prometheus.group", prometheus_group)
try:
repo_group = full_config.g(f"repos.{repo_name}.repo_group")
group_config = full_config.g(f"groups.{repo_group}")

260
npbackup/core/metrics.py Normal file
View file

@ -0,0 +1,260 @@
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of npbackup
__intname__ = "npbackup.core.metrics"
__author__ = "Orsiris de Jong"
__copyright__ = "Copyright (C) 2022-2025 NetInvent"
__license__ = "GPL-3.0-only"
__build__ = "2025061201"
import os
from typing import Optional, Tuple, List
from datetime import datetime, timezone
from logging import getLogger
from ofunctions.mailer import Mailer
from npbackup.restic_metrics import (
create_labels_string,
restic_str_output_to_json,
restic_json_to_prometheus,
upload_metrics,
write_metrics_file,
)
from npbackup.__version__ import __intname__ as NAME, version_dict
from npbackup.__debug__ import _DEBUG
logger = getLogger()
def metric_analyser(
repo_config: dict,
restic_result: bool,
result_string: str,
operation: str,
dry_run: bool,
append_metrics_file: bool,
exec_time: Optional[float] = None,
analyze_only: bool = False,
) -> Tuple[bool, bool]:
"""
Tries to get operation success and backup to small booleans from restic output
Returns op success, backup too small
"""
operation_success = True
backup_too_small = False
metrics = []
print(repo_config)
try:
repo_name = repo_config.g("name")
labels = {
"npversion": f"{NAME}{version_dict['version']}-{version_dict['build_type']}",
"repo_name": repo_name,
"action": operation,
}
if repo_config.g("global_prometheus.metrics"):
labels["backup_job"] = repo_config.g("prometheus.backup_job")
labels["group"] = repo_config.g("prometheus.group")
labels["instance"] = repo_config.g("global_prometheus.instance")
no_cert_verify = repo_config.g("global_prometheus.no_cert_verify")
destination = repo_config.g("global_prometheus.destination")
prometheus_additional_labels = repo_config.g(
"global_prometheus.additional_labels"
)
if isinstance(prometheus_additional_labels, dict):
for k, v in prometheus_additional_labels.items():
labels[k] = v
else:
logger.error(
f"Bogus value in configuration for prometheus additional labels: {prometheus_additional_labels}"
)
else:
destination = None
no_cert_verify = False
# We only analyse backup output of restic
if operation == "backup":
minimum_backup_size_error = repo_config.g(
"backup_opts.minimum_backup_size_error"
)
# If result was a str, we need to transform it into json first
if isinstance(result_string, str):
restic_result = restic_str_output_to_json(restic_result, result_string)
operation_success, metrics, backup_too_small = restic_json_to_prometheus(
restic_result=restic_result,
restic_json=restic_result,
labels=labels,
minimum_backup_size_error=minimum_backup_size_error,
)
if not operation_success or not restic_result:
logger.error("Backend finished with errors.")
"""
Add a metric for informing if any warning raised while executing npbackup_tasks
CRITICAL = 50 will be 3 in this metric, but should not really exist
ERROR = 40 will be 2 in this metric
WARNING = 30 will be 1 in this metric
INFO = 20 will be 0
"""
worst_exec_level = logger.get_worst_logger_level()
if worst_exec_level == 50:
exec_state = 3
elif worst_exec_level == 40:
exec_state = 2
elif worst_exec_level == 30:
exec_state = 1
else:
exec_state = 0
# exec_state update according to metric_analyser
if not operation_success or backup_too_small:
exec_state = 2
labels_string = create_labels_string(labels)
metrics.append(
f'npbackup_exec_state{{{labels_string},timestamp="{int(datetime.now(timezone.utc).timestamp())}"}} {exec_state}'
)
# Add upgrade state if upgrades activated
upgrade_state = os.environ.get("NPBACKUP_UPGRADE_STATE", None)
try:
upgrade_state = int(upgrade_state)
labels_string = create_labels_string(labels)
metrics.append(
f'npbackup_exec_state{{{labels_string},timestamp="{int(datetime.now(timezone.utc).timestamp())}"}} {upgrade_state}'
)
except (ValueError, TypeError):
pass
if isinstance(exec_time, (int, float)):
try:
metrics.append(
f'npbackup_exec_time{{{labels_string},timestamp="{int(datetime.now(timezone.utc).timestamp())}"}} {exec_time}'
)
except (ValueError, TypeError):
logger.warning("Cannot get exec time from environment")
if not analyze_only:
logger.debug("Metrics computed:\n{}".format("\n".join(metrics)))
send_prometheus_metrics(
repo_config,
metrics,
destination,
no_cert_verify,
dry_run,
append_metrics_file,
repo_name,
operation,
)
send_metrics_mail(repo_config, metrics)
except KeyError as exc:
logger.info("Metrics error: {}".format(exc))
logger.debug("Trace:", exc_info=True)
except OSError as exc:
logger.error("Metrics OS error: ".format(exc))
logger.debug("Trace:", exc_info=True)
return operation_success, backup_too_small
def send_prometheus_metrics(
repo_config: dict,
metrics: List[str],
destination: Optional[str] = None,
no_cert_verify: bool = False,
dry_run: bool = False,
append_metrics_file: bool = False,
repo_name: Optional[str] = None,
operation: Optional[str] = None,
) -> bool:
if destination and dry_run:
logger.info("Dry run mode. Not sending metrics.")
elif destination:
logger.debug("Sending metrics to {}".format(destination))
dest = destination.lower()
if dest.startswith("http"):
if not "metrics" in dest:
logger.error(
"Destination does not contain 'metrics' keyword. Not uploading."
)
return False
if not "job" in dest:
logger.error(
"Destination does not contain 'job' keyword. Not uploading."
)
return False
try:
authentication = (
repo_config.g("prometheus.http_username"),
repo_config.g("prometheus.http_password"),
)
except KeyError:
logger.info("No metrics authentication present.")
authentication = None
# Fix for #150, job name needs to be unique in order to avoid overwriting previous job in push gateway
destination = f"{destination}___repo_name={repo_name}___action={operation}"
upload_metrics(destination, authentication, no_cert_verify, metrics)
else:
write_metrics_file(destination, metrics, append=append_metrics_file)
else:
logger.debug("No metrics destination set. Not sending metrics")
def send_metrics_mail(repo_config: dict, metrics: List[str]):
"""
Sends metrics via email.
"""
if not metrics:
logger.warning("No metrics to send via email.")
return False
if not repo_config.g("global_email.enable"):
logger.debug(
"Metrics not enabled in configuration. Not sending metrics via email."
)
return False
smtp_server = repo_config.g("global_email.smtp_server")
smtp_port = repo_config.g("global_email.smtp_port")
smtp_security = repo_config.g("global_email.smtp_security")
if not smtp_server or not smtp_port or not smtp_security:
logger.warning(
"SMTP server/port or security not set. Not sending metrics via email."
)
return False
smtp_username = repo_config.g("global_email.smtp_username")
smtp_password = repo_config.g("global_email.smtp_password")
sender = repo_config.g("global_email.sender")
recipients = repo_config.g("global_email.recipients")
if not sender or not recipients:
logger.warning("Sender or recipients not set. Not sending metrics via email.")
return False
mailer = Mailer(
smtp_server=smtp_server,
smtp_port=smtp_port,
security=smtp_security,
smtp_user=smtp_username,
smtp_password=smtp_password,
debug=_DEBUG,
)
subject = (
f"Metrics for {NAME} {version_dict['version']}-{version_dict['build_type']}"
)
body = "\n".join(metrics)
try:
result = mailer.send_email(
sender_mail=sender, recipient_mails=recipients, subject=subject, body=body
)
if result:
logger.info("Metrics sent via email.")
return True
except Exception as exc:
logger.error(f"Failed to send metrics via email: {exc}")
logger.debug("Trace:", exc_info=True)
return False

View file

@ -10,7 +10,7 @@ __license__ = "GPL-3.0-only"
__build__ = "2025061201"
from typing import Optional, Callable, Union, List, Tuple
from typing import Optional, Callable, Union, List
import os
import logging
import tempfile
@ -25,19 +25,12 @@ from ofunctions.threading import threaded
from ofunctions.platform import os_arch
from ofunctions.misc import fn_name
import ntplib
from npbackup.restic_metrics import (
create_labels_string,
restic_str_output_to_json,
restic_json_to_prometheus,
upload_metrics,
write_metrics_file,
)
from npbackup.core.metrics import metric_analyser
from npbackup.restic_wrapper import ResticRunner
from npbackup.core.restic_source_binary import get_restic_internal_binary
from npbackup.core import jobs
from npbackup.path_helper import CURRENT_DIR, BASEDIR
import npbackup.pidfile_ng
from npbackup.__version__ import __intname__ as NAME, version_dict
from npbackup.__debug__ import _DEBUG, exception_to_string
from npbackup.__env__ import MAX_ALLOWED_NTP_OFFSET
@ -88,160 +81,6 @@ non_locking_operations = [
]
def metric_analyser(
repo_config: dict,
restic_result: bool,
result_string: str,
operation: str,
dry_run: bool,
append_metrics_file: bool,
exec_time: Optional[float] = None,
analyze_only: bool = False,
) -> Tuple[bool, bool]:
"""
Tries to get operation success and backup to small booleans from restic output
Returns op success, backup too small
"""
operation_success = True
backup_too_small = False
metrics = []
try:
repo_name = repo_config.g("name")
labels = {
"npversion": f"{NAME}{version_dict['version']}-{version_dict['build_type']}",
"repo_name": repo_name,
"action": operation,
}
if repo_config.g("prometheus.metrics"):
labels["instance"] = repo_config.g("prometheus.instance")
labels["backup_job"] = repo_config.g("prometheus.backup_job")
labels["group"] = repo_config.g("prometheus.group")
no_cert_verify = repo_config.g("prometheus.no_cert_verify")
destination = repo_config.g("prometheus.destination")
prometheus_additional_labels = repo_config.g("prometheus.additional_labels")
if isinstance(prometheus_additional_labels, dict):
for k, v in prometheus_additional_labels.items():
labels[k] = v
else:
logger.error(
f"Bogus value in configuration for prometheus additional labels: {prometheus_additional_labels}"
)
else:
destination = None
no_cert_verify = False
# We only analyse backup output of restic
if operation == "backup":
minimum_backup_size_error = repo_config.g(
"backup_opts.minimum_backup_size_error"
)
# If result was a str, we need to transform it into json first
if isinstance(result_string, str):
restic_result = restic_str_output_to_json(restic_result, result_string)
operation_success, metrics, backup_too_small = restic_json_to_prometheus(
restic_result=restic_result,
restic_json=restic_result,
labels=labels,
minimum_backup_size_error=minimum_backup_size_error,
)
if not operation_success or not restic_result:
logger.error("Backend finished with errors.")
"""
Add a metric for informing if any warning raised while executing npbackup_tasks
CRITICAL = 50 will be 3 in this metric, but should not really exist
ERROR = 40 will be 2 in this metric
WARNING = 30 will be 1 in this metric
INFO = 20 will be 0
"""
worst_exec_level = logger.get_worst_logger_level()
if worst_exec_level == 50:
exec_state = 3
elif worst_exec_level == 40:
exec_state = 2
elif worst_exec_level == 30:
exec_state = 1
else:
exec_state = 0
# exec_state update according to metric_analyser
if not operation_success or backup_too_small:
exec_state = 2
labels_string = create_labels_string(labels)
metrics.append(
f'npbackup_exec_state{{{labels_string},timestamp="{int(datetime.now(timezone.utc).timestamp())}"}} {exec_state}'
)
# Add upgrade state if upgrades activated
upgrade_state = os.environ.get("NPBACKUP_UPGRADE_STATE", None)
try:
upgrade_state = int(upgrade_state)
labels_string = create_labels_string(labels)
metrics.append(
f'npbackup_exec_state{{{labels_string},timestamp="{int(datetime.now(timezone.utc).timestamp())}"}} {upgrade_state}'
)
except (ValueError, TypeError):
pass
if isinstance(exec_time, (int, float)):
try:
metrics.append(
f'npbackup_exec_time{{{labels_string},timestamp="{int(datetime.now(timezone.utc).timestamp())}"}} {exec_time}'
)
except (ValueError, TypeError):
logger.warning("Cannot get exec time from environment")
if not analyze_only:
logger.debug("Metrics computed:\n{}".format("\n".join(metrics)))
if destination and dry_run:
logger.info("Dry run mode. Not sending metrics.")
elif destination:
logger.debug("Sending metrics to {}".format(destination))
dest = destination.lower()
if dest.startswith("http"):
if not "metrics" in dest:
logger.error(
"Destination does not contain 'metrics' keyword. Not uploading."
)
return backup_too_small
if not "job" in dest:
logger.error(
"Destination does not contain 'job' keyword. Not uploading."
)
return backup_too_small
try:
authentication = (
repo_config.g("prometheus.http_username"),
repo_config.g("prometheus.http_password"),
)
except KeyError:
logger.info("No metrics authentication present.")
authentication = None
# Fix for #150, job name needs to be unique in order to avoid overwriting previous job in push gateway
destination = (
f"{destination}___repo_name={repo_name}___action={operation}"
)
upload_metrics(destination, authentication, no_cert_verify, metrics)
else:
write_metrics_file(destination, metrics, append=append_metrics_file)
else:
logger.debug("No metrics destination set. Not sending metrics")
except KeyError as exc:
logger.info("Metrics error: {}".format(exc))
logger.debug("Trace:", exc_info=True)
except OSError as exc:
logger.error("Metrics OS error: ".format(exc))
logger.debug("Trace:", exc_info=True)
return operation_success, backup_too_small
def get_ntp_offset(ntp_server: str) -> Optional[float]:
"""
Get current time offset from ntp server
@ -821,7 +660,7 @@ class NPBackupRunner:
def metrics(fn: Callable):
"""
Write prometheus metrics
Analyse metrics and notify
"""
@wraps(fn)

View file

@ -13,7 +13,6 @@ __build__ = "2025022301"
from typing import List, Tuple
import os
import re
import pathlib
from logging import getLogger
import FreeSimpleGUI as sg
import textwrap
@ -21,6 +20,7 @@ from ruamel.yaml.comments import CommentedMap
from npbackup import configuration
from ofunctions.misc import get_key_from_value, BytesConverter
from npbackup.core.i18n_helper import _t
from npbackup.core.metrics import send_metrics_mail
from resources.customization import (
INHERITED_ICON,
NON_INHERITED_ICON,
@ -38,6 +38,7 @@ from resources.customization import (
INHERITED_SYMLINK_ICON,
)
from npbackup.task import create_scheduled_task
from npbackup.__debug__ import fmt_json
logger = getLogger()
@ -719,7 +720,12 @@ def config_gui(full_config: dict, config_file: str):
# Only update global options gui with identified global keys
for key in full_config.keys():
if key in ("identity", "global_prometheus", "global_options"):
if key in (
"identity",
"global_prometheus",
"global_email",
"global_options",
):
global_config.s(key, full_config.g(key))
iter_over_config(global_config, None, "group", unencrypted, None)
@ -835,6 +841,7 @@ def config_gui(full_config: dict, config_file: str):
key.startswith("global_options")
or key.startswith("identity")
or key.startswith("global_prometheus")
or key.startswith("global_email")
):
active_object_key = f"{key}"
current_value = full_config.g(active_object_key)
@ -2268,7 +2275,7 @@ Google Cloud storage: GOOGLE_PROJECT_ID GOOGLE_APPLICATION_CREDENTIALS\n\
sg.Input(key="global_prometheus.http_password", size=(50, 1)),
],
[
sg.Text(_t("config_gui.instance"), size=(40, 1)),
sg.Text(_t("config_gui.prometheus_instance"), size=(40, 1)),
sg.Input(key="global_prometheus.instance", size=(50, 1)),
],
[
@ -2306,6 +2313,86 @@ Google Cloud storage: GOOGLE_PROJECT_ID GOOGLE_APPLICATION_CREDENTIALS\n\
],
]
global_email_col = [
[sg.Text(_t("config_gui.available_variables"))],
[
sg.Checkbox(
_t("config_gui.enable_email_notifications"),
key="global_email.enable",
size=(41, 1),
),
],
[
sg.Text(_t("config_gui.email_instance"), size=(40, 1)),
sg.Input(key="global_email.instance", size=(50, 1)),
],
[
sg.Text(_t("config_gui.smtp_server"), size=(40, 1)),
sg.Input(key="global_email.smtp_server", size=(50, 1)),
],
[
sg.Text(_t("config_gui.smtp_port"), size=(40, 1)),
sg.Input(key="global_email.smtp_port", size=(41, 1)),
],
[
sg.Text(_t("config_gui.smtp_security"), size=(40, 1)),
sg.Combo(
["None", "ssl", "tls"],
key="global_email.smtp_security",
size=(50, 1),
),
],
[
sg.Text(_t("config_gui.smtp_username"), size=(40, 1)),
sg.Input(key="global_email.smtp_username", size=(50, 1)),
],
[
sg.Text(_t("config_gui.smtp_password"), size=(40, 1)),
sg.Input(key="global_email.smtp_password", size=(50, 1)),
],
[
sg.Text(_t("config_gui.sender"), size=(40, 1)),
sg.Input(key="global_email.sender", size=(50, 1)),
],
[
sg.Text(_t("config_gui.recipients"), size=(40, 1)),
sg.Input(key="global_email.recipients", size=(50, 1)),
],
[
sg.Checkbox(
_t("config_gui.email_on_backup_success"),
key="global_email.on_backup_success",
size=(41, 1),
),
],
[
sg.Checkbox(
_t("config_gui.email_on_backup_failure"),
key="global_email.on_backup_failure",
size=(41, 1),
),
],
[
sg.Checkbox(
_t("config_gui.email_on_operations_success"),
key="global_email.on_operations_success",
size=(41, 1),
),
],
[
sg.Checkbox(
_t("config_gui.email_on_operations_failure"),
key="global_email.on_operations_failure",
size=(41, 1),
),
],
[
sg.Button(
_t("config_gui.test_email"), key="--TEST-EMAIL--", size=(20, 1)
)
],
]
tab_group_layout = [
[
sg.Tab(
@ -2331,6 +2418,14 @@ Google Cloud storage: GOOGLE_PROJECT_ID GOOGLE_APPLICATION_CREDENTIALS\n\
key="--tab-global-prometheus--",
)
],
[
sg.Tab(
_t("config_gui.email_config"),
global_email_col,
font="helvetica 16",
key="--tab-global-email--",
)
],
]
_layout = [
@ -2757,6 +2852,19 @@ Google Cloud storage: GOOGLE_PROJECT_ID GOOGLE_APPLICATION_CREDENTIALS\n\
tree.delete(key)
window[option_key].Update(values=tree)
continue
if event == "--TEST-EMAIL--":
repo_config, _ = configuration.get_repo_config(
full_config, object_name, eval_variables=False
)
print(fmt_json(repo_config))
if send_metrics_mail(
repo_config=repo_config, metrics=["Thisis a test email"]
):
sg.Popup(_t("config_gui.test_email_success"), keep_on_top=True)
else:
sg.Popup(_t("config_gui.test_email_failure"), keep_on_top=True)
# WIP
continue
if event == "--ACCEPT--":
if object_type != "groups" and not values["repo_uri"]:
sg.PopupError(

View file

@ -8,6 +8,7 @@ ofunctions.threading>=2.2.0
ofunctions.platform>=1.5.1
ofunctions.random
ofunctions.requestor>=1.2.2
ofunctions.mailer>=1.3.0
python-pidfile>=3.0.0
# pysimplegui 5 has gone commercial, let's switch to freesimplegui
# keep in mind that freesimplegui might higher required python version in the future

View file

@ -49,7 +49,7 @@ en:
no_cert_verify: Do not verify SSL certificate
metrics_username: HTTP metrics username
metrics_password: HTTP metrics password
instance: Prometheus instance
prometheus_instance: Prometheus instance
additional_labels: Additional labels
no_config_available: No configuration file found. Please use --config-file "path" to specify one or copy a config file next to the NPBackup binary
@ -192,4 +192,22 @@ en:
repo_uri_cloud_hint: Cloud repo URI requires to set encrypted environment variables (see environment tab)
full_concurrency: Don't check for concurrency
repo_aware_concurrency: Allow concurrent runs on different repos
repo_aware_concurrency: Allow concurrent runs on different repos
email_config: Email configuration
enable_email_notifications: Enable email notifications
email_instance: Email instance name
smtp_server: SMTP server
smtp_port: SMTP port
smtp_security: SMTP security (none, tls, ssl)
smtp_username: SMTP username
smtp_password: SMTP password
sender: Sender email address
recipients: Comma separated recipient addresses
email_on_backup_success: Email on backup success
email_on_backup_failure: Email on backup failure
email_on_operations_success: Email on all operations success
email_on_operations_failure: Email on all operations failure
test_email: Send test notification email
test_email_success: Test email sent successfully
test_email_failure: Test email failed, see logs for details

View file

@ -50,7 +50,7 @@ fr:
no_cert_verify: Ne pas vérifier le certificat SSL
metrics_username: Nom d'utilisateur métriques HTTP
metrics_password: Mot de passe métriques HTTP
instance: Instance Prometheus
prometheus_instance: Instance Prometheus
additional_labels: Etiquettes supplémentaires
no_config_available: Aucun fichier de configuration trouvé. Merci d'utiliser --config-file "chemin" pour spécifier un fichier, ou copier un fichier de configuration a côté du binaire NPBackup.
@ -194,4 +194,22 @@ fr:
repo_uri_cloud_hint: L'URI du dépot Cloud nécessite de définir des variables d'environnement chiffrées (voir l'onglet environnement)
full_concurrency: Ne pas vérifier la concurrence
repo_aware_concurrency: Autoriser concurrencesur dépots différents
repo_aware_concurrency: Autoriser concurrencesur dépots différents
email_config: Configuration email
enable_email_notifications: Activer les notifications email
email_instance: Instance email
smtp_server: Serveur SMTP
smtp_port: Port SMTP
smtp_security: Sécurité
smtp_username: Nom d'utilisateur SMTP
smtp_password: Mot de passe SMTP
sender: Email expéditeur
recipients: Emails destinataires séparés par des virgules
email_on_backup_success: Email sur succès de sauvegarde
email_on_backup_failure: Email sur échec de sauvegarde
email_on_operations_success: Email sur succès des opérations
email_on_operations_failure: Email sur échec des opérations
test_email: Envoyer un email de testr
test_email_success: Email de test envoyé avec succès
test_email_failure: Échec de l'envoi du test email, veuillez consulter les journaux pour plus de détails