This commit is contained in:
mdecimus 2024-08-01 17:09:39 +02:00
parent ed214fd087
commit 3cb8918d2e
34 changed files with 500 additions and 388 deletions

View file

@ -2,6 +2,20 @@
All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/).
## [0.9.0] - 2024-08-01
To upgrade replace the `stalwart-mail` binary and then upgrade to the latest web-admin. This version includes breaking changes to the Webhooks configuration and produces a slightly different log output, read [UPGRADING.md](UPGRADING.md) for details.
## Added
- Improved and faster tracing and logging.
- Customizable event logging levels.
### Changed
### Fixed
- ManageSieve: Return capabilities after successful `STARTTLS`
- Do not provide `{auth_authen}` Milter macro unless the user is authenticated
## [0.8.5] - 2024-07-07
To upgrade replace the `stalwart-mail` binary.

607
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,14 @@
Upgrading from `v0.8.3` to `v0.9.0`
-----------------------------------
Version `0.9.0` introduces significant internal improvements while maintaining compatibility with existing database layouts and configuration file formats from version `0.8.0`. As a result, no data or configuration migration is necessary. This release focuses on enhancing performance and functionality, particularly in logging and tracing capabilities.
To upgrade to Stalwart Mail Server version `0.9.0` from `0.8.x`, begin by downloading the latest version of the `stalwart-mail` binary. Once downloaded, replace the existing binary with the new version. Additionally, it's important to update the WebAdmin interface to the latest version to ensure compatibility and to access new features introduced in this release.
In terms of breaking changes, this release brings significant updates to webhooks. All webhook event names have been modified, requiring a thorough review and adjustment of existing webhook configurations. Furthermore, the update introduces hundreds of new event types, enhancing the granularity and specificity of event handling capabilities. Users should familiarize themselves with these changes to effectively integrate them into their systems.
The reason for this release being classified as a major version, despite the absence of changes to the database or configuration formats, is the complete rewrite of the logging and tracing layer. This overhaul substantially improves the efficiency and speed of generating detailed tracing and logging events, making the system more robust and facilitating easier debugging and monitoring.
Upgrading from `v0.7.3` to `v0.8.0`
-----------------------------------

View file

@ -174,6 +174,7 @@ pub enum Response<T> {
#[derive(Deserialize)]
#[serde(tag = "error")]
#[serde(rename_all = "camelCase")]
pub enum ManagementApiError {
FieldAlreadyExists { field: String, value: String },
FieldMissing { field: String },
@ -181,7 +182,6 @@ pub enum ManagementApiError {
Unsupported { details: String },
AssertFailed,
Other { details: String },
UnsupportedDirectoryOperation { class: String },
}
impl Client {
@ -301,9 +301,6 @@ impl Display for ManagementApiError {
ManagementApiError::Other { details } => {
write!(f, "{}", details)
}
ManagementApiError::UnsupportedDirectoryOperation { class } => {
write!(f, "This operation is only available on internal directories. Your current directory is {class}.")
}
}
}
}

View file

@ -91,7 +91,7 @@ impl Tracers {
// Parse custom logging levels
let mut custom_levels = AHashMap::new();
for event_name in config
.sub_keys("tracing.level", "")
.prefix("tracing.level")
.map(|s| s.to_string())
.collect::<Vec<_>>()
{
@ -176,17 +176,30 @@ impl Tracers {
continue;
}
}
"console" | "stdout" | "stderr" => TracerType::Console(ConsoleTracer {
ansi: config
.property_or_default(("tracer", id, "ansi"), "true")
.unwrap_or(true),
multiline: config
.property_or_default(("tracer", id, "multiline"), "false")
.unwrap_or(false),
buffered: config
.property_or_default(("tracer", id, "buffered"), "true")
.unwrap_or(true),
}),
"console" | "stdout" | "stderr" => {
if !tracers
.iter()
.any(|t| matches!(t.typ, TracerType::Console(_)))
{
TracerType::Console(ConsoleTracer {
ansi: config
.property_or_default(("tracer", id, "ansi"), "true")
.unwrap_or(true),
multiline: config
.property_or_default(("tracer", id, "multiline"), "false")
.unwrap_or(false),
buffered: config
.property_or_default(("tracer", id, "buffered"), "true")
.unwrap_or(true),
})
} else {
config.new_build_error(
("tracer", id, "type"),
"Only one console tracer is allowed".to_string(),
);
continue;
}
}
"otel" | "open-telemetry" => {
let timeout = config
.property::<Duration>(("tracer", id, "timeout"))
@ -477,6 +490,7 @@ impl Tracers {
}
// Add default tracer if none were found
#[cfg(not(feature = "test_mode"))]
if tracers.is_empty() {
for event_type in EventType::variants() {
let event_level = custom_levels
@ -493,7 +507,7 @@ impl Tracers {
interests: global_interests.clone(),
typ: TracerType::Console(ConsoleTracer {
ansi: true,
multiline: true,
multiline: false,
buffered: true,
}),
lossy: false,

View file

@ -133,7 +133,7 @@ impl TcpAcceptor {
let key = core.build_acme_certificate(domain).await;
trc::event!(
Acme(trc::AcmeEvent::ClientSuppliedSNI),
Acme(trc::AcmeEvent::ClientSuppliedSni),
ListenerId = instance.id.clone(),
Protocol = instance.protocol,
Name = domain.to_string(),
@ -144,7 +144,7 @@ impl TcpAcceptor {
}
None => {
trc::event!(
Acme(trc::AcmeEvent::ClientMissingSNI),
Acme(trc::AcmeEvent::ClientMissingSni),
ListenerId = instance.id.clone(),
Protocol = instance.protocol,
);

View file

@ -299,12 +299,12 @@ impl Subscriber {
// https://systemd.io/JOURNAL_NATIVE_PROTOCOL/
use std::os::unix::prelude::AsRawFd;
// Write the whole payload to a memfd
let mut mem = memfd::create_sealable()?;
let mut mem = create_sealable()?;
mem.write_all(payload)?;
// Fully seal the memfd to signal journald that its backing data won't resize anymore
// and so is safe to mmap.
memfd::seal_fully(mem.as_raw_fd())?;
socket::send_one_fd_to(&self.socket, mem.as_raw_fd(), JOURNALD_PATH)
seal_fully(mem.as_raw_fd())?;
send_one_fd_to(&self.socket, mem.as_raw_fd(), JOURNALD_PATH)
}
}

View file

@ -152,7 +152,7 @@ impl Serialize for MethodErrorWrapper {
),
),
trc::JmapEvent::UnknownCapability
| trc::JmapEvent::NotJSON
| trc::JmapEvent::NotJson
| trc::JmapEvent::NotRequest => (
"serverUnavailable",
concat!(

View file

@ -41,20 +41,20 @@ impl<'x> Parser<'x> {
}
pub fn error(&self, message: &str) -> trc::Error {
trc::JmapEvent::NotJSON
trc::JmapEvent::NotJson
.into_err()
.details(format!("{message} at position {}.", self.pos))
}
pub fn error_unterminated(&self) -> trc::Error {
trc::JmapEvent::NotJSON.into_err().details(format!(
trc::JmapEvent::NotJson.into_err().details(format!(
"Unterminated string at position {pos}.",
pos = self.pos
))
}
pub fn error_utf8(&self) -> trc::Error {
trc::JmapEvent::NotJSON.into_err().details(format!(
trc::JmapEvent::NotJson.into_err().details(format!(
"Invalid UTF-8 sequence at position {pos}.",
pos = self.pos
))

View file

@ -707,7 +707,7 @@ impl ToRequestError for trc::Error {
match self.as_ref() {
trc::EventType::Jmap(cause) => match cause {
trc::JmapEvent::UnknownCapability => RequestError::unknown_capability(details),
trc::JmapEvent::NotJSON => RequestError::not_json(details),
trc::JmapEvent::NotJson => RequestError::not_json(details),
trc::JmapEvent::NotRequest => RequestError::not_request(details),
_ => RequestError::invalid_parameters(),
},

View file

@ -418,7 +418,11 @@ impl JMAP {
};
Err(manage::unsupported(format!(
"Requested action is unsupported for {class} directories.",
concat!(
"{} directory cannot be managed. ",
"Only internal directories support inserts and update operations."
),
class
)))
}
}

View file

@ -160,25 +160,29 @@ impl JMAP {
}
pub async fn get_access_token(&self, account_id: u32) -> trc::Result<AccessToken> {
match self
let err = match self
.core
.storage
.directory
.query(QueryBy::Id(account_id), true)
.await
{
Ok(Some(principal)) => self.update_access_token(AccessToken::new(principal)).await,
Ok(Some(principal)) => {
return self.update_access_token(AccessToken::new(principal)).await
}
Ok(None) => Err(trc::AuthEvent::Error
.into_err()
.details("Account not found.")
.caused_by(trc::location!())),
Err(err) => match &self.core.jmap.fallback_admin {
Some((_, secret)) if account_id == u32::MAX => {
self.update_access_token(AccessToken::new(Principal::fallback_admin(secret)))
.await
}
_ => Err(err),
},
Err(err) => Err(err),
};
match &self.core.jmap.fallback_admin {
Some((_, secret)) if account_id == u32::MAX => {
self.update_access_token(AccessToken::new(Principal::fallback_admin(secret)))
.await
}
_ => err,
}
}
}

View file

@ -280,7 +280,7 @@ impl<T: SessionStream> Session<T> {
if self.is_allowed().await {
trc::event!(
Smtp(SmtpEvent::RelayNotAllowed),
Smtp(SmtpEvent::RcptTo),
SpanId = self.data.session_id,
To = self.data.rcpt_to.last().unwrap().address_lcase.clone(),
);

View file

@ -500,7 +500,7 @@ impl DeliveryAttempt {
remote_hosts = remote_hosts_;
} else {
trc::event!(
Delivery(DeliveryEvent::NullMX),
Delivery(DeliveryEvent::NullMx),
SpanId = message.span_id,
Domain = domain.domain.clone(),
Elapsed = time.elapsed(),

View file

@ -111,7 +111,7 @@ impl ElasticSearchStore {
.exists(IndicesExistsParts::Index(&[INDEX_NAMES[0]]))
.send()
.await
.map_err(|err| trc::StoreEvent::ElasticSearchError.reason(err))?;
.map_err(|err| trc::StoreEvent::ElasticsearchError.reason(err))?;
if exists.status_code() == StatusCode::NOT_FOUND {
let response = self
@ -183,11 +183,11 @@ pub(crate) async fn assert_success(response: Result<Response, Error>) -> trc::Re
if status.is_success() {
Ok(response)
} else {
Err(trc::StoreEvent::ElasticSearchError
Err(trc::StoreEvent::ElasticsearchError
.reason(response.text().await.unwrap_or_default())
.ctx(trc::Key::Code, status.as_u16()))
}
}
Err(err) => Err(trc::StoreEvent::ElasticSearchError.reason(err)),
Err(err) => Err(trc::StoreEvent::ElasticsearchError.reason(err)),
}
}

View file

@ -107,14 +107,14 @@ impl ElasticSearchStore {
let json: Value = response
.json()
.await
.map_err(|err| trc::StoreEvent::ElasticSearchError.reason(err))?;
.map_err(|err| trc::StoreEvent::ElasticsearchError.reason(err))?;
let mut results = RoaringBitmap::new();
for hit in json["hits"]["hits"].as_array().ok_or_else(|| {
trc::StoreEvent::ElasticSearchError.reason("Invalid response from ElasticSearch")
trc::StoreEvent::ElasticsearchError.reason("Invalid response from ElasticSearch")
})? {
results.insert(hit["_source"]["document_id"].as_u64().ok_or_else(|| {
trc::StoreEvent::ElasticSearchError.reason("Invalid response from ElasticSearch")
trc::StoreEvent::ElasticsearchError.reason("Invalid response from ElasticSearch")
})? as u32);
}

View file

@ -77,7 +77,7 @@ impl TimedTransaction {
#[inline(always)]
fn into_error(error: FdbError) -> trc::Error {
trc::StoreEvent::FoundationDBError
trc::StoreEvent::FoundationdbError
.reason(error.message())
.ctx(trc::Key::Code, error.code())
}

View file

@ -94,7 +94,7 @@ impl FdbStore {
*key.last_mut().unwrap() += 1;
} else {
trx.cancel();
return Err(trc::StoreEvent::FoundationDBError
return Err(trc::StoreEvent::FoundationdbError
.ctx(
trc::Key::Reason,
"Value is too large",

View file

@ -20,5 +20,5 @@ pub struct MysqlStore {
#[inline(always)]
fn into_error(err: impl Display) -> trc::Error {
trc::StoreEvent::MySQLError.reason(err)
trc::StoreEvent::MysqlError.reason(err)
}

View file

@ -21,5 +21,5 @@ pub struct PostgresStore {
#[inline(always)]
fn into_error(err: impl Display) -> trc::Error {
trc::StoreEvent::PostgreSQLError.reason(err)
trc::StoreEvent::PostgresqlError.reason(err)
}

View file

@ -38,5 +38,5 @@ pub struct RocksDbStore {
#[inline(always)]
fn into_error(err: rocksdb::Error) -> trc::Error {
trc::StoreEvent::RocksDBError.reason(err)
trc::StoreEvent::RocksdbError.reason(err)
}

View file

@ -24,5 +24,5 @@ pub struct SqliteStore {
#[inline(always)]
fn into_error(err: impl Display) -> trc::Error {
trc::StoreEvent::SQLiteError.reason(err)
trc::StoreEvent::SqliteError.reason(err)
}

View file

@ -275,7 +275,7 @@ impl From<mail_auth::Error> for Event<EventType> {
mail_auth::Error::ArcInvalidInstance(instance) => {
EventType::Arc(ArcEvent::InvalidInstance).ctx(Key::Id, instance)
}
mail_auth::Error::ArcInvalidCV => EventType::Arc(ArcEvent::InvalidCV).into_err(),
mail_auth::Error::ArcInvalidCV => EventType::Arc(ArcEvent::InvalidCv).into_err(),
mail_auth::Error::ArcHasHeaderTag => EventType::Arc(ArcEvent::HasHeaderTag).into_err(),
mail_auth::Error::ArcBrokenChain => EventType::Arc(ArcEvent::BrokenChain).into_err(),
mail_auth::Error::NotAligned => {

View file

@ -333,9 +333,18 @@ mod tests {
assert!(!Level::Error.is_contained(Level::Trace));
assert!(!Level::Debug.is_contained(Level::Trace));
let mut names = Vec::with_capacity(100);
for event in EventType::variants() {
println!("{}", event.name());
names.push(event.name());
assert_eq!(EventType::try_parse(event.name()).unwrap(), event);
}
// sort
names.sort();
for name in names {
println!("{:?},", name);
}
}
}

View file

@ -146,7 +146,7 @@ impl Event<EventType> {
!matches!(
self.inner,
EventType::Jmap(
JmapEvent::UnknownCapability | JmapEvent::NotJSON | JmapEvent::NotRequest
JmapEvent::UnknownCapability | JmapEvent::NotJson | JmapEvent::NotRequest
)
)
}
@ -256,13 +256,13 @@ impl StoreEvent {
match self {
Self::AssertValueFailed => "Another process has modified the value",
Self::BlobMissingMarker => "Blob is missing marker",
Self::FoundationDBError => "FoundationDB error",
Self::MySQLError => "MySQL error",
Self::PostgreSQLError => "PostgreSQL error",
Self::RocksDBError => "RocksDB error",
Self::SQLiteError => "SQLite error",
Self::FoundationdbError => "FoundationDB error",
Self::MysqlError => "MySQL error",
Self::PostgresqlError => "PostgreSQL error",
Self::RocksdbError => "RocksDB error",
Self::SqliteError => "SQLite error",
Self::LdapError => "LDAP error",
Self::ElasticSearchError => "ElasticSearch error",
Self::ElasticsearchError => "ElasticSearch error",
Self::RedisError => "Redis error",
Self::S3Error => "S3 error",
Self::FilesystemError => "Filesystem error",
@ -388,7 +388,7 @@ impl JmapEvent {
Self::CannotCalculateChanges => "Cannot calculate changes",
Self::UnknownDataType => "Unknown data type",
Self::UnknownCapability => "Unknown capability",
Self::NotJSON => "Not JSON",
Self::NotJson => "Not JSON",
Self::NotRequest => "Not a request",
_ => "Other message",
}
@ -858,13 +858,13 @@ impl EventType {
StoreEvent::Ingest | StoreEvent::IngestDuplicate => Level::Info,
StoreEvent::IngestError
| StoreEvent::AssertValueFailed
| StoreEvent::FoundationDBError
| StoreEvent::MySQLError
| StoreEvent::PostgreSQLError
| StoreEvent::RocksDBError
| StoreEvent::SQLiteError
| StoreEvent::FoundationdbError
| StoreEvent::MysqlError
| StoreEvent::PostgresqlError
| StoreEvent::RocksdbError
| StoreEvent::SqliteError
| StoreEvent::LdapError
| StoreEvent::ElasticSearchError
| StoreEvent::ElasticsearchError
| StoreEvent::RedisError
| StoreEvent::S3Error
| StoreEvent::FilesystemError
@ -1089,7 +1089,7 @@ impl EventType {
EventType::Arc(event) => match event {
ArcEvent::ChainTooLong
| ArcEvent::InvalidInstance
| ArcEvent::InvalidCV
| ArcEvent::InvalidCv
| ArcEvent::HasHeaderTag
| ArcEvent::BrokenChain => Level::Debug,
ArcEvent::SealerNotFound => Level::Warn,
@ -1109,10 +1109,9 @@ impl EventType {
| PurgeEvent::TombstoneCleanup => Level::Debug,
},
EventType::Eval(event) => match event {
EvalEvent::Error => Level::Debug,
EvalEvent::Result => Level::Trace,
EvalEvent::Error | EvalEvent::DirectoryNotFound | EvalEvent::StoreNotFound => {
Level::Warn
}
EvalEvent::DirectoryNotFound | EvalEvent::StoreNotFound => Level::Warn,
},
EventType::Server(event) => match event {
ServerEvent::Startup | ServerEvent::Shutdown | ServerEvent::Licensing => {
@ -1144,18 +1143,18 @@ impl EventType {
| AcmeEvent::DnsRecordCreationFailed => Level::Warn,
AcmeEvent::RenewBackoff
| AcmeEvent::DnsRecordDeletionFailed
| AcmeEvent::ClientSuppliedSNI
| AcmeEvent::ClientMissingSNI
| AcmeEvent::ClientSuppliedSni
| AcmeEvent::ClientMissingSni
| AcmeEvent::DnsRecordNotPropagated
| AcmeEvent::DnsRecordLookupFailed => Level::Debug,
},
EventType::Tls(event) => match event {
TlsEvent::Handshake => Level::Info,
TlsEvent::HandshakeError => Level::Debug,
TlsEvent::HandshakeError | TlsEvent::CertificateNotFound => Level::Debug,
TlsEvent::NotConfigured => Level::Error,
TlsEvent::CertificateNotFound
| TlsEvent::NoCertificatesAvailable
| TlsEvent::MultipleCertificatesAvailable => Level::Warn,
TlsEvent::NoCertificatesAvailable | TlsEvent::MultipleCertificatesAvailable => {
Level::Warn
}
},
EventType::Sieve(event) => match event {
SieveEvent::NotSupported
@ -1182,8 +1181,8 @@ impl EventType {
SpamEvent::ListUpdated => Level::Info,
},
EventType::Http(event) => match event {
HttpEvent::Error | HttpEvent::XForwardedMissing => Level::Warn,
HttpEvent::RequestUrl => Level::Debug,
HttpEvent::XForwardedMissing => Level::Warn,
HttpEvent::Error | HttpEvent::RequestUrl => Level::Debug,
HttpEvent::RequestBody | HttpEvent::ResponseBody => Level::Trace,
},
EventType::PushSubscription(event) => match event {
@ -1269,7 +1268,7 @@ impl EventType {
| DeliveryEvent::DomainDeliveryStart
| DeliveryEvent::MxLookupFailed
| DeliveryEvent::IpLookupFailed
| DeliveryEvent::NullMX
| DeliveryEvent::NullMx
| DeliveryEvent::Connect
| DeliveryEvent::ConnectError
| DeliveryEvent::GreetingFailed
@ -1616,7 +1615,7 @@ impl DeliveryEvent {
DeliveryEvent::MxLookupFailed => "MX record lookup failed",
DeliveryEvent::IpLookup => "IP address lookup",
DeliveryEvent::IpLookupFailed => "IP address lookup failed",
DeliveryEvent::NullMX => "Null MX record found",
DeliveryEvent::NullMx => "Null MX record found",
DeliveryEvent::Connect => "Connecting to remote server",
DeliveryEvent::ConnectError => "Connection error",
DeliveryEvent::MissingOutboundHostname => "Missing outbound hostname in configuration",
@ -1849,9 +1848,9 @@ impl NetworkEvent {
match self {
NetworkEvent::ConnectionStart => "Network connection started",
NetworkEvent::ConnectionEnd => "Network connection ended",
NetworkEvent::ListenStart => "Network listening started",
NetworkEvent::ListenStop => "Network listening stopped",
NetworkEvent::ListenError => "Network listening error",
NetworkEvent::ListenStart => "Network listener started",
NetworkEvent::ListenStop => "Network listener stopped",
NetworkEvent::ListenError => "Network listener error",
NetworkEvent::BindError => "Network bind error",
NetworkEvent::ReadError => "Network read error",
NetworkEvent::WriteError => "Network write error",
@ -1920,8 +1919,8 @@ impl AcmeEvent {
AcmeEvent::DnsRecordLookupFailed => "ACME DNS record lookup failed",
AcmeEvent::DnsRecordPropagated => "ACME DNS record propagated",
AcmeEvent::DnsRecordPropagationTimeout => "ACME DNS record propagation timeout",
AcmeEvent::ClientSuppliedSNI => "ACME client supplied SNI",
AcmeEvent::ClientMissingSNI => "ACME client missing SNI",
AcmeEvent::ClientSuppliedSni => "ACME client supplied SNI",
AcmeEvent::ClientMissingSni => "ACME client missing SNI",
AcmeEvent::TlsAlpnReceived => "ACME TLS ALPN received",
AcmeEvent::TlsAlpnError => "ACME TLS ALPN error",
AcmeEvent::TokenNotFound => "ACME token not found",
@ -1980,7 +1979,7 @@ impl ArcEvent {
match self {
ArcEvent::ChainTooLong => "ARC chain too long",
ArcEvent::InvalidInstance => "Invalid ARC instance",
ArcEvent::InvalidCV => "Invalid ARC CV",
ArcEvent::InvalidCv => "Invalid ARC CV",
ArcEvent::HasHeaderTag => "ARC has header tag",
ArcEvent::BrokenChain => "Broken ARC chain",
ArcEvent::SealerNotFound => "ARC sealer not found",
@ -2073,13 +2072,13 @@ impl StoreEvent {
match self {
StoreEvent::IngestError => "Message ingestion error",
StoreEvent::AssertValueFailed => "Another process modified the record",
StoreEvent::FoundationDBError => "FoundationDB error",
StoreEvent::MySQLError => "MySQL error",
StoreEvent::PostgreSQLError => "PostgreSQL error",
StoreEvent::RocksDBError => "RocksDB error",
StoreEvent::SQLiteError => "SQLite error",
StoreEvent::FoundationdbError => "FoundationDB error",
StoreEvent::MysqlError => "MySQL error",
StoreEvent::PostgresqlError => "PostgreSQL error",
StoreEvent::RocksdbError => "RocksDB error",
StoreEvent::SqliteError => "SQLite error",
StoreEvent::LdapError => "LDAP error",
StoreEvent::ElasticSearchError => "ElasticSearch error",
StoreEvent::ElasticsearchError => "ElasticSearch error",
StoreEvent::RedisError => "Redis error",
StoreEvent::S3Error => "S3 error",
StoreEvent::FilesystemError => "Filesystem error",
@ -2122,7 +2121,7 @@ impl JmapEvent {
JmapEvent::CannotCalculateChanges => "Cannot calculate JMAP changes",
JmapEvent::UnknownDataType => "Unknown JMAP data type",
JmapEvent::UnknownCapability => "Unknown JMAP capability",
JmapEvent::NotJSON => "JMAP request is not JSON",
JmapEvent::NotJson => "JMAP request is not JSON",
JmapEvent::NotRequest => "JMAP input is not a request",
JmapEvent::WebsocketStart => "JMAP WebSocket connection started",
JmapEvent::WebsocketStop => "JMAP WebSocket connection stopped",

View file

@ -444,7 +444,7 @@ pub enum DeliveryEvent {
MxLookupFailed,
IpLookup,
IpLookupFailed,
NullMX,
NullMx,
Connect,
ConnectError,
MissingOutboundHostname,
@ -693,8 +693,8 @@ pub enum AcmeEvent {
DnsRecordLookupFailed,
DnsRecordPropagated,
DnsRecordPropagationTimeout,
ClientSuppliedSNI,
ClientMissingSNI,
ClientSuppliedSni,
ClientMissingSni,
TlsAlpnReceived,
TlsAlpnError,
TokenNotFound,
@ -741,7 +741,7 @@ pub enum ConfigEvent {
pub enum ArcEvent {
ChainTooLong,
InvalidInstance,
InvalidCV,
InvalidCv,
HasHeaderTag,
BrokenChain,
SealerNotFound,
@ -817,13 +817,13 @@ pub enum StoreEvent {
// Errors
IngestError,
AssertValueFailed,
FoundationDBError,
MySQLError,
PostgreSQLError,
RocksDBError,
SQLiteError,
FoundationdbError,
MysqlError,
PostgresqlError,
RocksdbError,
SqliteError,
LdapError,
ElasticSearchError,
ElasticsearchError,
RedisError,
S3Error,
FilesystemError,
@ -874,7 +874,7 @@ pub enum JmapEvent {
// Request errors
UnknownCapability,
NotJSON,
NotJson,
NotRequest,
// Not JMAP standard

View file

@ -45,7 +45,10 @@ impl Subscriber {
pub fn send_batch(&mut self) -> Result<(), ChannelError> {
if !self.batch.is_empty() {
match self.tx.try_send(std::mem::take(&mut self.batch)) {
match self
.tx
.try_send(std::mem::replace(&mut self.batch, Vec::with_capacity(128)))
{
Ok(_) => Ok(()),
Err(TrySendError::Full(mut events)) => {
if self.lossy && events.len() > MAX_BATCH_SIZE {

View file

@ -130,6 +130,13 @@ impl Config {
})
}
pub fn prefix<'x, 'y: 'x>(&'y self, prefix: impl AsKey) -> impl Iterator<Item = &str> + 'x {
let prefix = prefix.as_prefix();
self.keys
.keys()
.filter_map(move |key| key.strip_prefix(&prefix))
}
pub fn set_values<'x, 'y: 'x>(&'y self, prefix: impl AsKey) -> impl Iterator<Item = &str> + 'x {
let prefix = prefix.as_prefix();

View file

@ -0,0 +1,3 @@
#!/bin/sh
~/utils/glauth/glauth-darwin-arm64 -c tests/resources/ldap/ldap.cfg

View file

@ -2,6 +2,7 @@ From: "Mail Delivery Subsystem" <MAILER-DAEMON@example.org>
To: sender@foobar.org
Auto-Submitted: auto-generated
Subject: Warning: Delay in message delivery
MIME-Version: 1.0
Content-Type: multipart/report; report-type="delivery-status";
boundary="mime_boundary"

View file

@ -2,6 +2,7 @@ From: "Mail Delivery Subsystem" <MAILER-DAEMON@example.org>
To: sender@foobar.org
Auto-Submitted: auto-generated
Subject: Failed to deliver message
MIME-Version: 1.0
Content-Type: multipart/report; report-type="delivery-status";
boundary="mime_boundary"

View file

@ -2,6 +2,7 @@ From: "Mail Delivery Subsystem" <MAILER-DAEMON@example.org>
To: sender@foobar.org
Auto-Submitted: auto-generated
Subject: Partially delivered message
MIME-Version: 1.0
Content-Type: multipart/report; report-type="delivery-status";
boundary="mime_boundary"

View file

@ -2,6 +2,7 @@ From: "Mail Delivery Subsystem" <MAILER-DAEMON@example.org>
To: sender@foobar.org
Auto-Submitted: auto-generated
Subject: Successfully delivered message
MIME-Version: 1.0
Content-Type: multipart/report; report-type="delivery-status";
boundary="mime_boundary"