Reformat files with black

This commit is contained in:
Orsiris de Jong 2024-02-25 19:59:16 +01:00
parent 25541b31eb
commit 36fb6b3231
12 changed files with 747 additions and 368 deletions

View file

@ -48,10 +48,8 @@ def catch_exceptions(fn: Callable):
except Exception as exc:
# pylint: disable=E1101 (no-member)
operation = fn.__name__
logger.error(
f"Function {operation} failed with: {exc}", level="error"
)
logger.error(f"Function {operation} failed with: {exc}", level="error")
logger.error("Trace:", exc_info=True)
return None
return wrapper
return wrapper

View file

@ -42,10 +42,7 @@ logger = logging.getLogger()
def json_error_logging(result: bool, msg: str, level: str):
if _JSON:
js = {
"result": result,
"reason": msg
}
js = {"result": result, "reason": msg}
print(json.dumps(js))
logger.__getattribute__(level)(msg)
@ -151,13 +148,10 @@ This is free software, and you are welcome to redistribute it under certain cond
type=str,
default=None,
required=False,
help="Dump a specific file to stdout"
help="Dump a specific file to stdout",
)
parser.add_argument(
"--stats",
action="store_true",
help="Get repository statistics"
"--stats", action="store_true", help="Get repository statistics"
)
parser.add_argument(
"--raw",
@ -192,15 +186,13 @@ This is free software, and you are welcome to redistribute it under certain cond
help="Run in JSON API mode. Nothing else than JSON will be printed to stdout",
)
parser.add_argument(
"--stdin",
action="store_true",
help="Backup using data from stdin input"
"--stdin", action="store_true", help="Backup using data from stdin input"
)
parser.add_argument(
"--stdin-filename",
type=str,
default=None,
help="Alternate filename for stdin, defaults to 'stdin.data'"
help="Alternate filename for stdin, defaults to 'stdin.data'",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Show verbose output"
@ -230,7 +222,7 @@ This is free software, and you are welcome to redistribute it under certain cond
type=str,
default=None,
required=False,
help="Optional path for logfile"
help="Optional path for logfile",
)
args = parser.parse_args()
@ -249,10 +241,7 @@ This is free software, and you are welcome to redistribute it under certain cond
if args.version:
if _JSON:
print(json.dumps({
"result": True,
"version": version_dict
}))
print(json.dumps({"result": True, "version": version_dict}))
else:
print(version_string)
sys.exit(0)
@ -318,7 +307,7 @@ This is free software, and you are welcome to redistribute it under certain cond
cli_args["op_args"] = {
"force": True,
"read_from_stdin": True,
"stdin_filename": args.stdin_filename if args.stdin_filename else None
"stdin_filename": args.stdin_filename if args.stdin_filename else None,
}
elif args.backup:
cli_args["operation"] = "backup"
@ -394,7 +383,9 @@ def main():
cli_interface()
sys.exit(logger.get_worst_logger_level())
except KeyboardInterrupt as exc:
json_error_logging(False, f"Program interrupted by keyboard: {exc}", level="error")
json_error_logging(
False, f"Program interrupted by keyboard: {exc}", level="error"
)
logger.info("Trace:", exc_info=True)
# EXIT_CODE 200 = keyboard interrupt
sys.exit(200)

View file

@ -19,13 +19,13 @@ from npbackup.configuration import IS_PRIV_BUILD
version_string = f"{__intname__} v{__version__}-{'priv' if IS_PRIV_BUILD else 'pub'}-{sys.version_info[0]}.{sys.version_info[1]}-{python_arch()} {__build__} - {__copyright__}"
version_dict = {
'name': __intname__,
'version': __version__,
'buildtype': "priv" if IS_PRIV_BUILD else "pub",
'os': get_os_identifier(),
'arch': python_arch(),
'pv': sys.version_info,
'comp': "__compiled__" in globals(),
'build': __build__,
'copyright': __copyright__
"name": __intname__,
"version": __version__,
"buildtype": "priv" if IS_PRIV_BUILD else "pub",
"os": get_os_identifier(),
"arch": python_arch(),
"pv": sys.version_info,
"comp": "__compiled__" in globals(),
"build": __build__,
"copyright": __copyright__,
}

View file

@ -201,10 +201,7 @@ empty_config_dict = {
"backup_job": "${MACHINE_ID}",
"group": "${MACHINE_GROUP}",
},
"env": {
"env_variables": {},
"encrypted_env_variables": {}
},
"env": {"env_variables": {}, "encrypted_env_variables": {}},
},
"identity": {
"machine_id": "${HOSTNAME}__${RANDOM}[4]",
@ -383,7 +380,9 @@ def evaluate_variables(repo_config: dict, full_config: dict) -> dict:
count = 0
maxcount = 4 * 2 * 2
while count < maxcount:
repo_config = replace_in_iterable(repo_config, _evaluate_variables, callable_wants_key=True)
repo_config = replace_in_iterable(
repo_config, _evaluate_variables, callable_wants_key=True
)
count += 1
return repo_config
@ -394,8 +393,14 @@ def expand_units(object_config: dict, unexpand: bool = False) -> dict:
eg 50 KB to 500000
and 500000 to 50 KB in unexpand mode
"""
def _expand_units(key, value):
if key in ("minimum_backup_size_error", "exclude_files_larger_than", "upload_speed", "download_speed"):
if key in (
"minimum_backup_size_error",
"exclude_files_larger_than",
"upload_speed",
"download_speed",
):
if unexpand:
return BytesConverter(value).human_iec_bytes
return BytesConverter(value)
@ -404,7 +409,6 @@ def expand_units(object_config: dict, unexpand: bool = False) -> dict:
return replace_in_iterable(object_config, _expand_units, callable_wants_key=True)
def extract_permissions_from_full_config(full_config: dict) -> dict:
"""
Extract permissions and manager password from repo_uri tuple
@ -435,16 +439,28 @@ def inject_permissions_into_full_config(full_config: dict) -> Tuple[bool, dict]:
repo_uri = full_config.g(f"repos.{repo}.repo_uri")
manager_password = full_config.g(f"repos.{repo}.manager_password")
permissions = full_config.g(f"repos.{repo}.permissions")
__saved_manager_password = full_config.g(f"repos.{repo}.__saved_manager_password")
__saved_manager_password = full_config.g(
f"repos.{repo}.__saved_manager_password"
)
if __saved_manager_password and manager_password and __saved_manager_password == manager_password:
if (
__saved_manager_password
and manager_password
and __saved_manager_password == manager_password
):
updated_full_config = True
full_config.s(f"repos.{repo}.repo_uri", (repo_uri, permissions, manager_password))
full_config.s(
f"repos.{repo}.repo_uri", (repo_uri, permissions, manager_password)
)
full_config.s(f"repos.{repo}.is_protected", True)
else:
logger.info(f"Permissions are already set for repo {repo}. Will not update them unless manager password is given")
full_config.d(f"repos.{repo}.__saved_manager_password") # Don't keep decrypted manager password
logger.info(
f"Permissions are already set for repo {repo}. Will not update them unless manager password is given"
)
full_config.d(
f"repos.{repo}.__saved_manager_password"
) # Don't keep decrypted manager password
full_config.d(f"repos.{repo}.permissions")
full_config.d(f"repos.{repo}.manager_password")
return updated_full_config, full_config
@ -476,7 +492,7 @@ def get_repo_config(
_group_config = deepcopy(group_config)
_config_inheritance = deepcopy(repo_config)
# Make sure we make the initial config inheritance values False
_config_inheritance = replace_in_iterable(_config_inheritance, lambda _ : False)
_config_inheritance = replace_in_iterable(_config_inheritance, lambda _: False)
def _inherit_group_settings(
_repo_config: dict, _group_config: dict, _config_inheritance: dict
@ -497,14 +513,13 @@ def get_repo_config(
_config_inheritance.s(key, __config_inheritance)
elif isinstance(value, list):
if isinstance(_repo_config.g(key), list):
merged_lists = _repo_config.g(key) + value
# Case where repo config already contains non list info but group config has list
elif _repo_config.g(key):
merged_lists = [_repo_config.g(key)] + value
else:
merged_lists = value
# Special case when merged lists contain multiple dicts, we'll need to merge dicts
# unless lists have other object types than dicts
merged_items_dict = {}
@ -541,7 +556,7 @@ def get_repo_config(
# Case where repo_config contains list but group info has single str
elif isinstance(_repo_config.g(key), list) and value:
merged_lists = _repo_config.g(key) + [value]
# Special case when merged lists contain multiple dicts, we'll need to merge dicts
# unless lists have other object types than dicts
merged_items_dict = {}
@ -667,13 +682,13 @@ def load_config(config_file: Path) -> Optional[dict]:
"exclude_files",
"pre_exec_commands",
"post_exec_commands",
"additional_labels"
"env_variables",
"encrypted_env_variables"
):
"additional_labels" "env_variables",
"encrypted_env_variables",
):
if not isinstance(value, list):
value = [value]
return value
iter_over_keys(full_config, _make_list)
# Check if we need to encrypt some variables
@ -759,6 +774,9 @@ def get_repos_by_group(full_config: dict, group: str) -> List[str]:
repo_list = []
if full_config:
for repo in list(full_config.g("repos").keys()):
if full_config.g(f"repos.{repo}.repo_group") == group and group not in repo_list:
if (
full_config.g(f"repos.{repo}.repo_group") == group
and group not in repo_list
):
repo_list.append(repo)
return repo_list
return repo_list

View file

@ -24,7 +24,11 @@ from command_runner import command_runner
from ofunctions.threading import threaded
from ofunctions.platform import os_arch
from ofunctions.misc import BytesConverter
from npbackup.restic_metrics import restic_str_output_to_json, restic_json_to_prometheus, upload_metrics
from npbackup.restic_metrics import (
restic_str_output_to_json,
restic_json_to_prometheus,
upload_metrics,
)
from npbackup.restic_wrapper import ResticRunner
from npbackup.core.restic_source_binary import get_restic_internal_binary
from npbackup.path_helper import CURRENT_DIR, BASEDIR
@ -41,9 +45,7 @@ def metric_writer(
backup_too_small = False
minimum_backup_size_error = repo_config.g("backup_opts.minimum_backup_size_error")
try:
labels = {
"npversion": f"{NAME}{VERSION}"
}
labels = {"npversion": f"{NAME}{VERSION}"}
if repo_config.g("prometheus.metrics"):
labels["instance"] = repo_config.g("prometheus.instance")
labels["backup_job"] = repo_config.g("prometheus.backup_job")
@ -51,7 +53,7 @@ def metric_writer(
no_cert_verify = repo_config.g("prometheus.no_cert_verify")
destination = repo_config.g("prometheus.destination")
prometheus_additional_labels = repo_config.g("prometheus.additional_labels")
if not isinstance(prometheus_additional_labels, list):
prometheus_additional_labels = [prometheus_additional_labels]
@ -78,7 +80,10 @@ def metric_writer(
restic_result = restic_str_output_to_json(restic_result, result_string)
errors, metrics, backup_too_small = restic_json_to_prometheus(
restic_result=restic_result, restic_json=restic_result, labels=labels, minimum_backup_size_error=minimum_backup_size_error
restic_result=restic_result,
restic_json=restic_result,
labels=labels,
minimum_backup_size_error=minimum_backup_size_error,
)
if errors or not restic_result:
logger.error("Restic finished with errors.")
@ -94,9 +99,7 @@ def metric_writer(
logger.info("No metrics authentication present.")
authentication = None
if not dry_run:
upload_metrics(
destination, authentication, no_cert_verify, metrics
)
upload_metrics(destination, authentication, no_cert_verify, metrics)
else:
logger.info("Not uploading metrics in dry run mode")
else:
@ -179,7 +182,6 @@ class NPBackupRunner:
self.write_logs(msg, level="critical", raise_error="ValueError")
self._verbose = value
@property
def json_output(self):
return self._json_output
@ -279,7 +281,11 @@ class NPBackupRunner:
result = fn(self, *args, **kwargs)
self.exec_time = (datetime.utcnow() - start_time).total_seconds()
# Optional patch result with exec time
if self.restic_runner and self.restic_runner.json_output and isinstance(result, dict):
if (
self.restic_runner
and self.restic_runner.json_output
and isinstance(result, dict)
):
result["exec_time"] = self.exec_time
# pylint: disable=E1101 (no-member)
self.write_logs(
@ -329,7 +335,7 @@ class NPBackupRunner:
js = {
"result": False,
"operation": operation,
"reason": "backend not ready"
"reason": "backend not ready",
}
return js
self.write_logs(
@ -375,13 +381,13 @@ class NPBackupRunner:
else:
# pylint: disable=E1101 (no-member)
operation = fn.__name__
current_permissions = self.repo_config.g("permissions")
self.write_logs(
f"Permissions required are {required_permissions[operation]}, current permissions are {current_permissions}",
level="info",
)
has_permissions = True # TODO: enforce permissions
has_permissions = True # TODO: enforce permissions
if not has_permissions:
raise PermissionError
except (IndexError, KeyError, PermissionError):
@ -390,7 +396,7 @@ class NPBackupRunner:
js = {
"result": False,
"operation": operation,
"reason": "Not enough permissions"
"reason": "Not enough permissions",
}
return js
return False
@ -481,7 +487,7 @@ class NPBackupRunner:
js = {
"result": False,
"operation": operation,
"reason": f"Exception: {exc}"
"reason": f"Exception: {exc}",
}
return js
return False
@ -677,8 +683,14 @@ class NPBackupRunner:
self.restic_runner.stderr = self.stderr
return True
def convert_to_json_output(self, result: bool, output: str = None, backend_js: dict = None, warnings: str = None):
def convert_to_json_output(
self,
result: bool,
output: str = None,
backend_js: dict = None,
warnings: str = None,
):
if self.json_output:
if backend_js:
js = backend_js
@ -696,7 +708,7 @@ class NPBackupRunner:
js["reason"] = output
return js
return result
###########################
# ACTUAL RUNNER FUNCTIONS #
###########################
@ -796,9 +808,7 @@ class NPBackupRunner:
)
# Temporarily disable verbose and enable json result
self.restic_runner.verbose = False
data = self.restic_runner.has_recent_snapshot(
self.minimum_backup_age
)
data = self.restic_runner.has_recent_snapshot(self.minimum_backup_age)
self.restic_runner.verbose = self.verbose
if self.json_output:
return data
@ -835,7 +845,12 @@ class NPBackupRunner:
@is_ready
@apply_config_to_restic_runner
@catch_exceptions
def backup(self, force: bool = False, read_from_stdin: bool = False, stdin_filename: str = "stdin.data") -> bool:
def backup(
self,
force: bool = False,
read_from_stdin: bool = False,
stdin_filename: str = "stdin.data",
) -> bool:
"""
Run backup after checking if no recent backup exists, unless force == True
"""
@ -846,7 +861,9 @@ class NPBackupRunner:
if not read_from_stdin:
paths = self.repo_config.g("backup_opts.paths")
if not paths:
msg = f"No paths to backup defined for repo {self.repo_config.g('name')}"
msg = (
f"No paths to backup defined for repo {self.repo_config.g('name')}"
)
self.write_logs(msg, level="critical")
return self.convert_to_json_output(False, msg)
@ -877,9 +894,11 @@ class NPBackupRunner:
if not isinstance(exclude_files, list):
exclude_files = [exclude_files]
excludes_case_ignore = self.repo_config.g("backup_opts.excludes_case_ignore")
excludes_case_ignore = self.repo_config.g(
"backup_opts.excludes_case_ignore"
)
exclude_caches = self.repo_config.g("backup_opts.exclude_caches")
exclude_files_larger_than = self.repo_config.g(
"backup_opts.exclude_files_larger_than"
)
@ -888,7 +907,7 @@ class NPBackupRunner:
BytesConverter(exclude_files_larger_than)
except ValueError:
warning = f"Bogus unit for exclude_files_larger_than value given: {exclude_files_larger_than}"
self.write_logs( warning, level="warning")
self.write_logs(warning, level="warning")
warnings.append(warning)
exclude_files_larger_than = None
exclude_files_larger_than = None
@ -934,7 +953,9 @@ class NPBackupRunner:
self.json_output = False
# Since we don't want to close queues nor create a subthread, we need to change behavior here
# pylint: disable=E1123 (unexpected-keyword-arg)
has_recent_snapshots, backup_tz = self.has_recent_snapshot(__close_queues=False, __no_threads=True)
has_recent_snapshots, backup_tz = self.has_recent_snapshot(
__close_queues=False, __no_threads=True
)
self.json_output = json_output
# We also need to "reapply" the json setting to backend
self.restic_runner.json_output = json_output
@ -957,7 +978,10 @@ class NPBackupRunner:
level="info",
)
else:
self.write_logs(f"Running backup of piped stdin data as name {stdin_filename} to repo {self.repo_config.g('name')}", level="info")
self.write_logs(
f"Running backup of piped stdin data as name {stdin_filename} to repo {self.repo_config.g('name')}",
level="info",
)
pre_exec_commands_success = True
if pre_exec_commands:
@ -999,16 +1023,21 @@ class NPBackupRunner:
read_from_stdin=read_from_stdin,
stdin_filename=stdin_filename,
tags=tags,
additional_backup_only_parameters=additional_backup_only_parameters
additional_backup_only_parameters=additional_backup_only_parameters,
)
self.write_logs(f"Restic output:\n{self.restic_runner.backup_result_content}", level="debug")
self.write_logs(
f"Restic output:\n{self.restic_runner.backup_result_content}", level="debug"
)
# Extract backup size from result_string
# Metrics will not be in json format, since we need to diag cloud issues until
# there is a fix for https://github.com/restic/restic/issues/4155
backup_too_small = metric_writer(
self.repo_config, result, self.restic_runner.backup_result_content, self.restic_runner.dry_run
self.repo_config,
result,
self.restic_runner.backup_result_content,
self.restic_runner.dry_run,
)
if backup_too_small:
self.write_logs("Backup is smaller than expected", level="error")
@ -1034,10 +1063,15 @@ class NPBackupRunner:
)
operation_result = (
result and pre_exec_commands_success and post_exec_commands_success and not backup_too_small
result
and pre_exec_commands_success
and post_exec_commands_success
and not backup_too_small
)
msg = f"Operation finished with {'success' if operation_result else 'failure'}"
self.write_logs(msg, level="info" if operation_result else "error",
self.write_logs(
msg,
level="info" if operation_result else "error",
)
if not operation_result:
# patch result if json
@ -1192,7 +1226,9 @@ class NPBackupRunner:
@apply_config_to_restic_runner
@catch_exceptions
def dump(self, path: str) -> bool:
self.write_logs(f"Dumping {path} from {self.repo_config.g('name')}", level="info")
self.write_logs(
f"Dumping {path} from {self.repo_config.g('name')}", level="info"
)
result = self.restic_runner.dump(path)
return result
@ -1205,9 +1241,11 @@ class NPBackupRunner:
@apply_config_to_restic_runner
@catch_exceptions
def stats(self) -> bool:
self.write_logs(f"Getting stats of repo {self.repo_config.g('name')}", level="info")
self.write_logs(
f"Getting stats of repo {self.repo_config.g('name')}", level="info"
)
result = self.restic_runner.stats()
return result
return result
@threaded
@close_queues

View file

@ -731,4 +731,4 @@ may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
"""
"""

View file

@ -223,7 +223,12 @@ def _make_treedata_from_json(ls_result: List[dict]) -> sg.TreeData:
def ls_window(repo_config: dict, snapshot_id: str) -> bool:
result = gui_thread_runner(
repo_config, "ls", snapshot=snapshot_id, __stdout=False, __autoclose=True, __compact=True
repo_config,
"ls",
snapshot=snapshot_id,
__stdout=False,
__autoclose=True,
__compact=True,
)
if not result["result"]:
sg.Popup("main_gui.snapshot_is_empty")
@ -397,12 +402,12 @@ def _main_gui(viewer_mode: bool):
global logger
parser = ArgumentParser(
prog=f"{__intname__}",
description="""Portable Network Backup Client\n
prog=f"{__intname__}",
description="""Portable Network Backup Client\n
This program is distributed under the GNU General Public License and comes with ABSOLUTELY NO WARRANTY.\n
This is free software, and you are welcome to redistribute it under certain conditions; Please type --license for more info.""",
)
)
parser.add_argument(
"-c",
"--config-file",
@ -425,7 +430,7 @@ def _main_gui(viewer_mode: bool):
type=str,
default=None,
required=False,
help="Optional path for logfile"
help="Optional path for logfile",
)
args = parser.parse_args()
if args.log_file:
@ -602,7 +607,7 @@ def _main_gui(viewer_mode: bool):
return None, None
def get_config(config_file: str = None, window: sg.Window = None):
full_config, config_file = get_config_file(config_file = config_file)
full_config, config_file = get_config_file(config_file=config_file)
if full_config and config_file:
repo_config, config_inheritance = npbackup.configuration.get_repo_config(
full_config
@ -621,12 +626,12 @@ def _main_gui(viewer_mode: bool):
if config_file:
window.set_title(f"{SHORT_PRODUCT_NAME} - {config_file}")
if not viewer_mode and config_file:
window['--LAUNCH-BACKUP--'].Update(disabled=False)
window['--OPERATIONS--'].Update(disabled=False)
window['--FORGET--'].Update(disabled=False)
window['--CONFIGURE--'].Update(disabled=False)
window["--LAUNCH-BACKUP--"].Update(disabled=False)
window["--OPERATIONS--"].Update(disabled=False)
window["--FORGET--"].Update(disabled=False)
window["--CONFIGURE--"].Update(disabled=False)
if repo_list:
window['-active_repo-'].Update(values=repo_list, value=repo_list[0])
window["-active_repo-"].Update(values=repo_list, value=repo_list[0])
return (
full_config,
config_file,
@ -646,7 +651,7 @@ def _main_gui(viewer_mode: bool):
backend_type,
repo_uri,
repo_list,
) = get_config(config_file = config_file)
) = get_config(config_file=config_file)
else:
# Let's try to read standard restic repository env variables
viewer_repo_uri = os.environ.get("RESTIC_REPOSITORY", None)
@ -681,7 +686,10 @@ def _main_gui(viewer_mode: bool):
[sg.Text(_t("main_gui.viewer_mode"))]
if viewer_mode
else [],
[sg.Text("{} ".format(_t("main_gui.backup_state"))), sg.Text("", key="-backend_type-")],
[
sg.Text("{} ".format(_t("main_gui.backup_state"))),
sg.Text("", key="-backend_type-"),
],
[
sg.Button(
_t("generic.unknown"),
@ -696,8 +704,15 @@ def _main_gui(viewer_mode: bool):
),
],
[
sg.Text(_t("main_gui.no_config"), font=("Arial", 14), text_color="red", key="-NO-CONFIG-", visible=False)
] if not viewer_mode
sg.Text(
_t("main_gui.no_config"),
font=("Arial", 14),
text_color="red",
key="-NO-CONFIG-",
visible=False,
)
]
if not viewer_mode
else [],
[
sg.Text(_t("main_gui.backup_list_to")),
@ -706,7 +721,7 @@ def _main_gui(viewer_mode: bool):
key="-active_repo-",
default_value=repo_list[0] if repo_list else None,
enable_events=True,
size=(20, 1)
size=(20, 1),
),
]
if not viewer_mode
@ -719,7 +734,7 @@ def _main_gui(viewer_mode: bool):
justification="left",
key="snapshot-list",
select_mode="extended",
size=(None, 10)
size=(None, 10),
)
],
[
@ -731,23 +746,31 @@ def _main_gui(viewer_mode: bool):
sg.Button(
_t("main_gui.launch_backup"),
key="--LAUNCH-BACKUP--",
disabled=viewer_mode or (not viewer_mode and not config_file),
),
sg.Button(_t("main_gui.see_content"), key="--SEE-CONTENT--",
disabled=not viewer_mode and not config_file
disabled=viewer_mode
or (not viewer_mode and not config_file),
),
sg.Button(
_t("generic.forget"), key="--FORGET--", disabled=viewer_mode or (not viewer_mode and not config_file)
_t("main_gui.see_content"),
key="--SEE-CONTENT--",
disabled=not viewer_mode and not config_file,
),
sg.Button(
_t("generic.forget"),
key="--FORGET--",
disabled=viewer_mode
or (not viewer_mode and not config_file),
), # TODO , visible=False if repo_config.g("permissions") != "full" else True),
sg.Button(
_t("main_gui.operations"),
key="--OPERATIONS--",
disabled=viewer_mode or (not viewer_mode and not config_file),
disabled=viewer_mode
or (not viewer_mode and not config_file),
),
sg.Button(
_t("generic.configure"),
key="--CONFIGURE--",
disabled=viewer_mode or (not viewer_mode and not config_file),
disabled=viewer_mode
or (not viewer_mode and not config_file),
),
sg.Button(
_t("main_gui.load_config"),
@ -793,7 +816,7 @@ def _main_gui(viewer_mode: bool):
backup_tz = None
snapshot_list = []
gui_update_state()
while True:
event, values = window.read(timeout=60000)
@ -856,9 +879,13 @@ def _main_gui(viewer_mode: bool):
# Make sure we trigger a GUI refresh when configuration is changed
event = "--STATE-BUTTON--"
if event == "--OPEN-REPO--":
viewer_repo_uri, viewer_repo_password = viewer_repo_gui(viewer_repo_uri, viewer_repo_password)
viewer_repo_uri, viewer_repo_password = viewer_repo_gui(
viewer_repo_uri, viewer_repo_password
)
if not viewer_repo_uri or not viewer_repo_password:
sg.Popup(_t("main_gui.repo_and_password_cannot_be_empty"), keep_on_top=True)
sg.Popup(
_t("main_gui.repo_and_password_cannot_be_empty"), keep_on_top=True
)
continue
repo_config = viewer_create_repo(viewer_repo_uri, viewer_repo_password)
event = "--STATE-BUTTON--"

File diff suppressed because it is too large Load diff

View file

@ -112,7 +112,7 @@ def gui_thread_runner(
# So we don't always init repo_config, since runner.group_runner would do that itself
if __repo_config:
runner.repo_config = __repo_config
fn = getattr(runner, __fn_name)
logger.debug(
f"gui_thread_runner runs {fn.__name__} {'with' if USE_THREADING else 'without'} threads"
@ -194,7 +194,7 @@ def gui_thread_runner(
_t("generic.close"),
key="--EXIT--",
button_color=(TXT_COLOR_LDR, BG_COLOR_LDR),
disabled=True
disabled=True,
)
],
]

View file

@ -168,7 +168,10 @@ def restic_str_output_to_json(
def restic_json_to_prometheus(
restic_result: bool, restic_json: dict, labels: dict = None, minimum_backup_size_error: str = None,
restic_result: bool,
restic_json: dict,
labels: dict = None,
minimum_backup_size_error: str = None,
) -> Tuple[bool, List[str], bool]:
"""
Transform a restic JSON result into prometheus metrics
@ -225,8 +228,9 @@ def restic_json_to_prometheus(
backup_too_small = False
if minimum_backup_size_error:
if not restic_json["data_added"] or \
restic_json["data_added"] < int(BytesConverter(str(minimum_backup_size_error).replace(" ", "")).bytes):
if not restic_json["data_added"] or restic_json["data_added"] < int(
BytesConverter(str(minimum_backup_size_error).replace(" ", "")).bytes
):
backup_too_small = True
good_backup = restic_result and not backup_too_small

View file

@ -30,7 +30,8 @@ from npbackup.__env__ import FAST_COMMANDS_TIMEOUT, CHECK_INTERVAL
logger = getLogger()
fn_name = lambda n=0: sys._getframe(n + 1).f_code.co_name # TODO go to ofunctions.misc
fn_name = lambda n=0: sys._getframe(n + 1).f_code.co_name # TODO go to ofunctions.misc
class ResticRunner:
def __init__(
@ -186,7 +187,7 @@ class ResticRunner:
self._dry_run = value
else:
raise ValueError("Bogus dry run value givne")
@property
def json_output(self) -> bool:
return self._json_output
@ -256,8 +257,7 @@ class ResticRunner:
errors_allowed: bool = False,
no_output_queues: bool = False,
timeout: int = None,
stdin: sys.stdin = None
stdin: sys.stdin = None,
) -> Tuple[bool, str]:
"""
Executes restic with given command
@ -271,7 +271,7 @@ class ResticRunner:
else ""
)
_cmd = f'"{self._binary}" {additional_parameters}{cmd}{self.generic_arguments}'
self._executor_running = True
self.write_logs(f"Running command: [{_cmd}]", level="debug")
self._make_env()
@ -512,7 +512,9 @@ class ResticRunner:
self.is_init = True
return True
else:
if re.search(".*already exists|.*already initialized", output, re.IGNORECASE):
if re.search(
".*already exists|.*already initialized", output, re.IGNORECASE
):
self.write_logs("Repo is already initialized.", level="info")
self.is_init = True
return True
@ -528,7 +530,9 @@ class ResticRunner:
We'll just check if snapshots can be read
"""
cmd = "snapshots"
self._is_init, output = self.executor(cmd, timeout=FAST_COMMANDS_TIMEOUT, errors_allowed=True)
self._is_init, output = self.executor(
cmd, timeout=FAST_COMMANDS_TIMEOUT, errors_allowed=True
)
if not self._is_init:
self.write_logs(output, level="error")
return self._is_init
@ -558,7 +562,8 @@ class ResticRunner:
if fn.__name__ == "backup":
if not self.init():
self.write_logs(
f"Could not initialize repo for backup operation", level="critical"
f"Could not initialize repo for backup operation",
level="critical",
)
return None
else:
@ -572,7 +577,7 @@ class ResticRunner:
return fn(self, *args, **kwargs)
return wrapper
def convert_to_json_output(self, result, output, msg=None, **kwargs):
"""
result, output = command_runner results
@ -589,7 +594,7 @@ class ResticRunner:
"result": result,
"operation": operation,
"args": kwargs,
"output": None
"output": None,
}
if result:
if output:
@ -612,7 +617,7 @@ class ResticRunner:
try:
js["output"] = json.loads(line)
except json.decoder.JSONDecodeError:
js["output"] = {'data': line}
js["output"] = {"data": line}
if msg:
self.write_logs(msg, level="info")
else:
@ -622,7 +627,7 @@ class ResticRunner:
else:
js["reason"] = output
return js
if result:
if msg:
self.write_logs(msg, level="info")
@ -631,7 +636,6 @@ class ResticRunner:
self.write_logs(msg, level="error")
return False
@check_if_init
def list(self, subject: str) -> Union[bool, str, dict]:
"""
@ -650,7 +654,6 @@ class ResticRunner:
msg = f"Failed to list {subject} objects:\n{output}"
return self.convert_to_json_output(result, output, msg=msg, **kwargs)
@check_if_init
def ls(self, snapshot: str) -> Union[bool, str, dict]:
"""
@ -660,7 +663,7 @@ class ResticRunner:
# snapshot db125b40 of [C:\\GIT\\npbackup] filtered by [] at 2023-01-03 09:41:30.9104257 +0100 CET):
return output.split("\n", 2)[2]
Using --json here does not return actual json content, but lines with each file being a json...
Using --json here does not return actual json content, but lines with each file being a json...
"""
kwargs = locals()
@ -674,7 +677,6 @@ class ResticRunner:
msg = f"Could not list snapshot {snapshot} content:\n{output}"
return self.convert_to_json_output(result, output, msg=msg, **kwargs)
# @check_if_init # We don't need to run if init before checking snapshots since if init searches for snapshots
def snapshots(self) -> Union[bool, str, dict]:
"""
@ -683,7 +685,7 @@ class ResticRunner:
"""
kwargs = locals()
kwargs.pop("self")
cmd = "snapshots"
result, output = self.executor(cmd, timeout=FAST_COMMANDS_TIMEOUT)
if result:
@ -770,7 +772,9 @@ class ResticRunner:
if exclude_caches:
cmd += " --exclude-caches"
if exclude_files_larger_than:
exclude_files_larger_than = int(BytesConverter(exclude_files_larger_than).bytes)
exclude_files_larger_than = int(
BytesConverter(exclude_files_larger_than).bytes
)
cmd += f" --exclude-larger-than {exclude_files_larger_than}"
if one_file_system:
cmd += " --one-file-system"
@ -799,7 +803,8 @@ class ResticRunner:
result, output = self.executor(cmd)
if (
not read_from_stdin and use_fs_snapshot
not read_from_stdin
and use_fs_snapshot
and not result
and re.search("VSS Error", output, re.IGNORECASE)
):
@ -833,9 +838,10 @@ class ResticRunner:
msg = f"Could not find path {path}:\n{output}"
return self.convert_to_json_output(result, output, msg=msg, **kwargs)
@check_if_init
def restore(self, snapshot: str, target: str, includes: List[str] = None) -> Union[bool, str, dict]:
def restore(
self, snapshot: str, target: str, includes: List[str] = None
) -> Union[bool, str, dict]:
"""
Restore given snapshot to directory
"""
@ -858,7 +864,6 @@ class ResticRunner:
msg = f"Data not restored:\n{output}"
return self.convert_to_json_output(result, output, msg=msg, **kwargs)
@check_if_init
def forget(
self,
@ -938,7 +943,6 @@ class ResticRunner:
msg = "Could not prune repository"
return self.convert_to_json_output(result, output=output, msg=msg, **kwargs)
@check_if_init
def check(self, read_data: bool = True) -> Union[bool, str, dict]:
"""
@ -955,7 +959,6 @@ class ResticRunner:
msg = "Repo check failed"
return self.convert_to_json_output(result, output, msg=msg, **kwargs)
@check_if_init
def repair(self, subject: str) -> Union[bool, str, dict]:
"""
@ -990,7 +993,7 @@ class ResticRunner:
else:
msg = f"Repo unlock failed:\n{output}"
return self.convert_to_json_output(result, output, msg=msg, **kwargs)
@check_if_init
def dump(self, path: str) -> Union[bool, str, dict]:
"""
@ -1088,7 +1091,7 @@ class ResticRunner:
if not delta:
if self.json_output:
msg = "No delta given"
self.convert_to_json_output(False, None, msg=msg **kwargs)
self.convert_to_json_output(False, None, msg=msg**kwargs)
return False, None
try:
# Make sure we run with json support for this one

View file

@ -27,9 +27,9 @@ def serialize_datetime(obj):
By default, datetime objects aren't serialisable to json directly
Here's a quick converter from https://www.geeksforgeeks.org/how-to-fix-datetime-datetime-not-json-serializable-in-python/
"""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Type not serializable")
if isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Type not serializable")
def entrypoint(*args, **kwargs):
@ -48,6 +48,7 @@ def entrypoint(*args, **kwargs):
print(json.dumps(result, default=serialize_datetime))
sys.exit(0)
def auto_upgrade(full_config: dict):
pass