mirror of
https://github.com/simple-login/app.git
synced 2025-09-05 06:04:18 +08:00
fix: migrate to logger.warning
usage (#2456)
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
This commit is contained in:
parent
a3d7732ed8
commit
abc981fa08
7 changed files with 10 additions and 10 deletions
|
@ -187,7 +187,7 @@ class MailSender:
|
||||||
TimeoutError,
|
TimeoutError,
|
||||||
) as e:
|
) as e:
|
||||||
if retries > 0:
|
if retries > 0:
|
||||||
LOG.warn(
|
LOG.warning(
|
||||||
f"Retrying sending email due to error {e}. {retries} retries left. Will wait {0.3*retries} seconds."
|
f"Retrying sending email due to error {e}. {retries} retries left. Will wait {0.3*retries} seconds."
|
||||||
)
|
)
|
||||||
time.sleep(0.3 * retries)
|
time.sleep(0.3 * retries)
|
||||||
|
|
2
cron.py
2
cron.py
|
@ -920,7 +920,7 @@ def check_custom_domain():
|
||||||
):
|
):
|
||||||
alias_count = Alias.filter(Alias.custom_domain_id == custom_domain.id).count()
|
alias_count = Alias.filter(Alias.custom_domain_id == custom_domain.id).count()
|
||||||
if alias_count > 0:
|
if alias_count > 0:
|
||||||
LOG.warn(
|
LOG.warning(
|
||||||
f"Custom Domain {custom_domain} has {alias_count} aliases. Won't delete"
|
f"Custom Domain {custom_domain} has {alias_count} aliases. Won't delete"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -34,7 +34,7 @@ class PostgresEventSource(EventSource):
|
||||||
try:
|
try:
|
||||||
self.__listen(on_event)
|
self.__listen(on_event)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warn(f"Error listening to events: {e}")
|
LOG.warning(f"Error listening to events: {e}")
|
||||||
sleep(_POSTGRES_RECONNECT_INTERVAL_SECONDS)
|
sleep(_POSTGRES_RECONNECT_INTERVAL_SECONDS)
|
||||||
self.__connect()
|
self.__connect()
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ class PostgresEventSource(EventSource):
|
||||||
else:
|
else:
|
||||||
LOG.info(f"Could not find event with id={notify.payload}")
|
LOG.info(f"Could not find event with id={notify.payload}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warn(f"Error getting event: {e}")
|
LOG.warning(f"Error getting event: {e}")
|
||||||
Session.close() # Ensure we get a new connection and we don't leave a dangling tx
|
Session.close() # Ensure we get a new connection and we don't leave a dangling tx
|
||||||
|
|
||||||
def __connect(self):
|
def __connect(self):
|
||||||
|
@ -112,5 +112,5 @@ class DeadLetterEventSource(EventSource):
|
||||||
LOG.debug("No dead letter events")
|
LOG.debug("No dead letter events")
|
||||||
sleep(_DEAD_LETTER_INTERVAL_SECONDS)
|
sleep(_DEAD_LETTER_INTERVAL_SECONDS)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warn(f"Error getting dead letter event: {e}")
|
LOG.warning(f"Error getting dead letter event: {e}")
|
||||||
sleep(_DEAD_LETTER_INTERVAL_SECONDS)
|
sleep(_DEAD_LETTER_INTERVAL_SECONDS)
|
||||||
|
|
|
@ -46,5 +46,5 @@ class Runner:
|
||||||
event.retry_count = event.retry_count + 1
|
event.retry_count = event.retry_count + 1
|
||||||
Session.commit()
|
Session.commit()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warn(f"Exception processing event [id={event.id}]: {e}")
|
LOG.warning(f"Exception processing event [id={event.id}]: {e}")
|
||||||
newrelic.agent.record_custom_metric("Custom/sync_event_failed", 1)
|
newrelic.agent.record_custom_metric("Custom/sync_event_failed", 1)
|
||||||
|
|
|
@ -372,13 +372,13 @@ def execute():
|
||||||
jobs_done += 1
|
jobs_done += 1
|
||||||
LOG.d("Processed job %s", job)
|
LOG.d("Processed job %s", job)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warn(f"Error processing job (id={job.id} name={job.name}): {e}")
|
LOG.warning(f"Error processing job (id={job.id} name={job.name}): {e}")
|
||||||
|
|
||||||
# Increment manually, as the attempts increment is done by the take_job but not
|
# Increment manually, as the attempts increment is done by the take_job but not
|
||||||
# updated in our instance
|
# updated in our instance
|
||||||
job_attempts = job.attempts + 1
|
job_attempts = job.attempts + 1
|
||||||
if job_attempts >= config.JOB_MAX_ATTEMPTS:
|
if job_attempts >= config.JOB_MAX_ATTEMPTS:
|
||||||
LOG.warn(
|
LOG.warning(
|
||||||
f"Marking job (id={job.id} name={job.name} attempts={job_attempts}) as ERROR"
|
f"Marking job (id={job.id} name={job.name} attempts={job_attempts}) as ERROR"
|
||||||
)
|
)
|
||||||
job.state = JobState.error.value
|
job.state = JobState.error.value
|
||||||
|
|
|
@ -17,4 +17,4 @@ class MetricExporter:
|
||||||
self.__newrelic.send(metrics)
|
self.__newrelic.send(metrics)
|
||||||
LOG.info("Upcloud metrics sent to NewRelic")
|
LOG.info("Upcloud metrics sent to NewRelic")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warn(f"Could not export metrics: {e}")
|
LOG.warning(f"Could not export metrics: {e}")
|
||||||
|
|
|
@ -35,7 +35,7 @@ def get_metric(json: Any, metric: str) -> UpcloudMetric:
|
||||||
UpcloudRecord(time=time, db_role=db_role, label=label, value=value)
|
UpcloudRecord(time=time, db_role=db_role, label=label, value=value)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
LOG.warn(f"Could not get value for metric {metric}")
|
LOG.warning(f"Could not get value for metric {metric}")
|
||||||
|
|
||||||
return UpcloudMetric(metric_name=metric, records=records)
|
return UpcloudMetric(metric_name=metric, records=records)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue