diff --git a/app/mail_sender.py b/app/mail_sender.py index 28849271..8a59e787 100644 --- a/app/mail_sender.py +++ b/app/mail_sender.py @@ -187,7 +187,7 @@ class MailSender: TimeoutError, ) as e: if retries > 0: - LOG.warn( + LOG.warning( f"Retrying sending email due to error {e}. {retries} retries left. Will wait {0.3*retries} seconds." ) time.sleep(0.3 * retries) diff --git a/cron.py b/cron.py index 8162d9d1..b74da0dc 100644 --- a/cron.py +++ b/cron.py @@ -920,7 +920,7 @@ def check_custom_domain(): ): alias_count = Alias.filter(Alias.custom_domain_id == custom_domain.id).count() if alias_count > 0: - LOG.warn( + LOG.warning( f"Custom Domain {custom_domain} has {alias_count} aliases. Won't delete" ) else: diff --git a/events/event_source.py b/events/event_source.py index 2b5920c3..d440988b 100644 --- a/events/event_source.py +++ b/events/event_source.py @@ -34,7 +34,7 @@ class PostgresEventSource(EventSource): try: self.__listen(on_event) except Exception as e: - LOG.warn(f"Error listening to events: {e}") + LOG.warning(f"Error listening to events: {e}") sleep(_POSTGRES_RECONNECT_INTERVAL_SECONDS) self.__connect() @@ -68,7 +68,7 @@ class PostgresEventSource(EventSource): else: LOG.info(f"Could not find event with id={notify.payload}") except Exception as e: - LOG.warn(f"Error getting event: {e}") + LOG.warning(f"Error getting event: {e}") Session.close() # Ensure we get a new connection and we don't leave a dangling tx def __connect(self): @@ -112,5 +112,5 @@ class DeadLetterEventSource(EventSource): LOG.debug("No dead letter events") sleep(_DEAD_LETTER_INTERVAL_SECONDS) except Exception as e: - LOG.warn(f"Error getting dead letter event: {e}") + LOG.warning(f"Error getting dead letter event: {e}") sleep(_DEAD_LETTER_INTERVAL_SECONDS) diff --git a/events/runner.py b/events/runner.py index 2fa47306..3c39d7a3 100644 --- a/events/runner.py +++ b/events/runner.py @@ -46,5 +46,5 @@ class Runner: event.retry_count = event.retry_count + 1 Session.commit() except Exception as e: - LOG.warn(f"Exception processing event [id={event.id}]: {e}") + LOG.warning(f"Exception processing event [id={event.id}]: {e}") newrelic.agent.record_custom_metric("Custom/sync_event_failed", 1) diff --git a/job_runner.py b/job_runner.py index 4cea7811..36b763d9 100644 --- a/job_runner.py +++ b/job_runner.py @@ -372,13 +372,13 @@ def execute(): jobs_done += 1 LOG.d("Processed job %s", job) except Exception as e: - LOG.warn(f"Error processing job (id={job.id} name={job.name}): {e}") + LOG.warning(f"Error processing job (id={job.id} name={job.name}): {e}") # Increment manually, as the attempts increment is done by the take_job but not # updated in our instance job_attempts = job.attempts + 1 if job_attempts >= config.JOB_MAX_ATTEMPTS: - LOG.warn( + LOG.warning( f"Marking job (id={job.id} name={job.name} attempts={job_attempts}) as ERROR" ) job.state = JobState.error.value diff --git a/monitor/metric_exporter.py b/monitor/metric_exporter.py index 164f25ca..875c7f31 100644 --- a/monitor/metric_exporter.py +++ b/monitor/metric_exporter.py @@ -17,4 +17,4 @@ class MetricExporter: self.__newrelic.send(metrics) LOG.info("Upcloud metrics sent to NewRelic") except Exception as e: - LOG.warn(f"Could not export metrics: {e}") + LOG.warning(f"Could not export metrics: {e}") diff --git a/monitor/upcloud.py b/monitor/upcloud.py index c7f1aecc..bf751c83 100644 --- a/monitor/upcloud.py +++ b/monitor/upcloud.py @@ -35,7 +35,7 @@ def get_metric(json: Any, metric: str) -> UpcloudMetric: UpcloudRecord(time=time, db_role=db_role, label=label, value=value) ) else: - LOG.warn(f"Could not get value for metric {metric}") + LOG.warning(f"Could not get value for metric {metric}") return UpcloudMetric(metric_name=metric, records=records)