mirror of
https://github.com/stalwartlabs/mail-server.git
synced 2025-10-10 20:45:48 +08:00
Fixed tests for FDB and SQL stores
This commit is contained in:
parent
2eb99ed3bd
commit
c19d2ceb43
16 changed files with 113 additions and 42 deletions
|
@ -328,7 +328,7 @@ async fn restore_file(store: Store, blob_store: BlobStore, path: &Path) {
|
||||||
set: true,
|
set: true,
|
||||||
});
|
});
|
||||||
|
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
store
|
store
|
||||||
.write(batch.build_all())
|
.write(batch.build_all())
|
||||||
.await
|
.await
|
||||||
|
|
|
@ -296,7 +296,7 @@ impl TracingStore for Store {
|
||||||
let mut batch = BatchBuilder::new();
|
let mut batch = BatchBuilder::new();
|
||||||
|
|
||||||
for key in delete_keys {
|
for key in delete_keys {
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
self.write(batch.build_all()).await?;
|
self.write(batch.build_all()).await?;
|
||||||
batch = BatchBuilder::new();
|
batch = BatchBuilder::new();
|
||||||
}
|
}
|
||||||
|
|
|
@ -196,7 +196,7 @@ impl ManageReports for Server {
|
||||||
|
|
||||||
batch.clear(ValueClass::Report(report_id));
|
batch.clear(ValueClass::Report(report_id));
|
||||||
|
|
||||||
if batch.len() > 1000 {
|
if batch.is_large_batch() {
|
||||||
if let Err(err) =
|
if let Err(err) =
|
||||||
server.core.storage.data.write(batch.build_all()).await
|
server.core.storage.data.write(batch.build_all()).await
|
||||||
{
|
{
|
||||||
|
|
|
@ -716,20 +716,19 @@ async fn handle_session<T: SessionStream>(inner: Arc<Inner>, session: SessionDat
|
||||||
};
|
};
|
||||||
|
|
||||||
// Parse HTTP request
|
// Parse HTTP request
|
||||||
let response = match server
|
let response = match Box::pin(server.parse_http_request(
|
||||||
.parse_http_request(
|
req,
|
||||||
req,
|
HttpSessionData {
|
||||||
HttpSessionData {
|
instance,
|
||||||
instance,
|
local_ip: session.local_ip,
|
||||||
local_ip: session.local_ip,
|
local_port: session.local_port,
|
||||||
local_port: session.local_port,
|
remote_ip,
|
||||||
remote_ip,
|
remote_port: session.remote_port,
|
||||||
remote_port: session.remote_port,
|
is_tls,
|
||||||
is_tls,
|
session_id: session.session_id,
|
||||||
session_id: session.session_id,
|
},
|
||||||
},
|
))
|
||||||
)
|
.await
|
||||||
.await
|
|
||||||
{
|
{
|
||||||
Ok(response) => response,
|
Ok(response) => response,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
|
|
@ -50,7 +50,9 @@ impl MysqlStore {
|
||||||
&& start.elapsed() < MAX_COMMIT_TIME => {}
|
&& start.elapsed() < MAX_COMMIT_TIME => {}
|
||||||
CommitError::Retry => {
|
CommitError::Retry => {
|
||||||
if retry_count > MAX_COMMIT_ATTEMPTS || start.elapsed() > MAX_COMMIT_TIME {
|
if retry_count > MAX_COMMIT_ATTEMPTS || start.elapsed() > MAX_COMMIT_TIME {
|
||||||
return Err(trc::StoreEvent::AssertValueFailed.into());
|
return Err(trc::StoreEvent::AssertValueFailed
|
||||||
|
.into_err()
|
||||||
|
.caused_by(trc::location!()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
CommitError::Mysql(err) => {
|
CommitError::Mysql(err) => {
|
||||||
|
@ -168,6 +170,7 @@ impl MysqlStore {
|
||||||
trx.rollback().await?;
|
trx.rollback().await?;
|
||||||
return Err(trc::StoreEvent::AssertValueFailed
|
return Err(trc::StoreEvent::AssertValueFailed
|
||||||
.into_err()
|
.into_err()
|
||||||
|
.caused_by(trc::location!())
|
||||||
.into());
|
.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -219,6 +222,11 @@ impl MysqlStore {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
ValueOp::Clear => {
|
ValueOp::Clear => {
|
||||||
|
// Update asserted value
|
||||||
|
if let Some(exists) = asserted_values.get_mut(&key) {
|
||||||
|
*exists = false;
|
||||||
|
}
|
||||||
|
|
||||||
let s = trx
|
let s = trx
|
||||||
.prep(format!("DELETE FROM {} WHERE k = ?", table))
|
.prep(format!("DELETE FROM {} WHERE k = ?", table))
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -304,7 +312,10 @@ impl MysqlStore {
|
||||||
.unwrap_or_else(|| (false, assert_value.is_none()));
|
.unwrap_or_else(|| (false, assert_value.is_none()));
|
||||||
if !matches {
|
if !matches {
|
||||||
trx.rollback().await?;
|
trx.rollback().await?;
|
||||||
return Err(trc::StoreEvent::AssertValueFailed.into_err().into());
|
return Err(trc::StoreEvent::AssertValueFailed
|
||||||
|
.into_err()
|
||||||
|
.caused_by(trc::location!())
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
asserted_values.insert(key, exists);
|
asserted_values.insert(key, exists);
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,9 @@ impl PostgresStore {
|
||||||
) if retry_count < MAX_COMMIT_ATTEMPTS
|
) if retry_count < MAX_COMMIT_ATTEMPTS
|
||||||
&& start.elapsed() < MAX_COMMIT_TIME => {}
|
&& start.elapsed() < MAX_COMMIT_TIME => {}
|
||||||
Some(&SqlState::UNIQUE_VIOLATION) => {
|
Some(&SqlState::UNIQUE_VIOLATION) => {
|
||||||
return Err(trc::StoreEvent::AssertValueFailed.into());
|
return Err(trc::StoreEvent::AssertValueFailed
|
||||||
|
.into_err()
|
||||||
|
.caused_by(trc::location!()));
|
||||||
}
|
}
|
||||||
_ => return Err(into_error(err)),
|
_ => return Err(into_error(err)),
|
||||||
},
|
},
|
||||||
|
@ -57,7 +59,9 @@ impl PostgresStore {
|
||||||
if retry_count > MAX_COMMIT_ATTEMPTS
|
if retry_count > MAX_COMMIT_ATTEMPTS
|
||||||
|| start.elapsed() > MAX_COMMIT_TIME
|
|| start.elapsed() > MAX_COMMIT_TIME
|
||||||
{
|
{
|
||||||
return Err(trc::StoreEvent::AssertValueFailed.into());
|
return Err(trc::StoreEvent::AssertValueFailed
|
||||||
|
.into_err()
|
||||||
|
.caused_by(trc::location!()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -165,7 +169,10 @@ impl PostgresStore {
|
||||||
};
|
};
|
||||||
|
|
||||||
if trx.execute(&s, &[&key, &(*value)]).await? == 0 {
|
if trx.execute(&s, &[&key, &(*value)]).await? == 0 {
|
||||||
return Err(trc::StoreEvent::AssertValueFailed.into_err().into());
|
return Err(trc::StoreEvent::AssertValueFailed
|
||||||
|
.into_err()
|
||||||
|
.caused_by(trc::location!())
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ValueOp::AtomicAdd(by) => {
|
ValueOp::AtomicAdd(by) => {
|
||||||
|
@ -210,6 +217,11 @@ impl PostgresStore {
|
||||||
.prepare_cached(&format!("DELETE FROM {} WHERE k = $1", table))
|
.prepare_cached(&format!("DELETE FROM {} WHERE k = $1", table))
|
||||||
.await?;
|
.await?;
|
||||||
trx.execute(&s, &[&key]).await?;
|
trx.execute(&s, &[&key]).await?;
|
||||||
|
|
||||||
|
// Update asserted value
|
||||||
|
if let Some(exists) = asserted_values.get_mut(&key) {
|
||||||
|
*exists = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -298,7 +310,10 @@ impl PostgresStore {
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|| (false, assert_value.is_none()));
|
.unwrap_or_else(|| (false, assert_value.is_none()));
|
||||||
if !matches {
|
if !matches {
|
||||||
return Err(trc::StoreEvent::AssertValueFailed.into_err().into());
|
return Err(trc::StoreEvent::AssertValueFailed
|
||||||
|
.into_err()
|
||||||
|
.caused_by(trc::location!())
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
asserted_values.insert(key, exists);
|
asserted_values.insert(key, exists);
|
||||||
}
|
}
|
||||||
|
|
|
@ -398,7 +398,7 @@ impl InMemoryStore {
|
||||||
class: ValueClass::InMemory(InMemoryClass::Key(key)),
|
class: ValueClass::InMemory(InMemoryClass::Key(key)),
|
||||||
op: ValueOp::Clear,
|
op: ValueOp::Clear,
|
||||||
});
|
});
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
store
|
store
|
||||||
.write(batch.build_all())
|
.write(batch.build_all())
|
||||||
.await
|
.await
|
||||||
|
@ -425,7 +425,7 @@ impl InMemoryStore {
|
||||||
class: ValueClass::InMemory(InMemoryClass::Key(key)),
|
class: ValueClass::InMemory(InMemoryClass::Key(key)),
|
||||||
op: ValueOp::Clear,
|
op: ValueOp::Clear,
|
||||||
});
|
});
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
store
|
store
|
||||||
.write(batch.build_all())
|
.write(batch.build_all())
|
||||||
.await
|
.await
|
||||||
|
|
|
@ -360,7 +360,7 @@ impl Store {
|
||||||
let mut batch = BatchBuilder::new();
|
let mut batch = BatchBuilder::new();
|
||||||
|
|
||||||
for key in delete_keys {
|
for key in delete_keys {
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
self.write(std::mem::take(&mut batch).build_all())
|
self.write(std::mem::take(&mut batch).build_all())
|
||||||
.await
|
.await
|
||||||
.caused_by(trc::location!())?;
|
.caused_by(trc::location!())?;
|
||||||
|
@ -638,7 +638,7 @@ impl Store {
|
||||||
class: ValueClass::InMemory(InMemoryClass::Key(key)),
|
class: ValueClass::InMemory(InMemoryClass::Key(key)),
|
||||||
op: ValueOp::Clear,
|
op: ValueOp::Clear,
|
||||||
});
|
});
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
self.write(batch.build_all()).await.unwrap();
|
self.write(batch.build_all()).await.unwrap();
|
||||||
batch = BatchBuilder::new();
|
batch = BatchBuilder::new();
|
||||||
}
|
}
|
||||||
|
@ -659,7 +659,7 @@ impl Store {
|
||||||
class: ValueClass::InMemory(InMemoryClass::Key(key)),
|
class: ValueClass::InMemory(InMemoryClass::Key(key)),
|
||||||
op: ValueOp::Clear,
|
op: ValueOp::Clear,
|
||||||
});
|
});
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
self.write(batch.build_all()).await.unwrap();
|
self.write(batch.build_all()).await.unwrap();
|
||||||
batch = BatchBuilder::new();
|
batch = BatchBuilder::new();
|
||||||
}
|
}
|
||||||
|
|
|
@ -217,7 +217,7 @@ impl Store {
|
||||||
.update_document(document.document_id);
|
.update_document(document.document_id);
|
||||||
|
|
||||||
for key in keys.into_iter() {
|
for key in keys.into_iter() {
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
self.write(batch.build_all()).await?;
|
self.write(batch.build_all()).await?;
|
||||||
batch = BatchBuilder::new();
|
batch = BatchBuilder::new();
|
||||||
batch
|
batch
|
||||||
|
@ -311,8 +311,10 @@ impl Store {
|
||||||
batch.update_document(document_id);
|
batch.update_document(document_id);
|
||||||
|
|
||||||
for key in keys {
|
for key in keys {
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
self.write(batch.build_all()).await?;
|
self.write(batch.build_all())
|
||||||
|
.await
|
||||||
|
.caused_by(trc::location!())?;
|
||||||
batch = BatchBuilder::new();
|
batch = BatchBuilder::new();
|
||||||
batch
|
batch
|
||||||
.with_account_id(account_id)
|
.with_account_id(account_id)
|
||||||
|
@ -327,7 +329,9 @@ impl Store {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !batch.is_empty() {
|
if !batch.is_empty() {
|
||||||
self.write(batch.build_all()).await?;
|
self.write(batch.build_all())
|
||||||
|
.await
|
||||||
|
.caused_by(trc::location!())?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -116,7 +116,7 @@ impl Store {
|
||||||
batch.with_account_id(account_id);
|
batch.with_account_id(account_id);
|
||||||
let mut last_collection = u8::MAX;
|
let mut last_collection = u8::MAX;
|
||||||
for (revoke_account_id, acl_item) in delete_keys.into_iter() {
|
for (revoke_account_id, acl_item) in delete_keys.into_iter() {
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
self.write(batch.build_all())
|
self.write(batch.build_all())
|
||||||
.await
|
.await
|
||||||
.caused_by(trc::location!())?;
|
.caused_by(trc::location!())?;
|
||||||
|
|
|
@ -355,7 +355,7 @@ impl BatchBuilder {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn commit_point(&mut self) -> &mut Self {
|
pub fn commit_point(&mut self) -> &mut Self {
|
||||||
if self.batch_size > 5_000_000 || self.batch_ops > 1000 {
|
if self.is_large_batch() {
|
||||||
self.serialize_changes();
|
self.serialize_changes();
|
||||||
self.commit_points.push(self.ops.len());
|
self.commit_points.push(self.ops.len());
|
||||||
self.batch_ops = 0;
|
self.batch_ops = 0;
|
||||||
|
@ -370,6 +370,11 @@ impl BatchBuilder {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn is_large_batch(&self) -> bool {
|
||||||
|
self.batch_size > 5_000_000 || self.batch_ops > 1000
|
||||||
|
}
|
||||||
|
|
||||||
pub fn any_op(&mut self, op: Operation) -> &mut Self {
|
pub fn any_op(&mut self, op: Operation) -> &mut Self {
|
||||||
self.ops.push(op);
|
self.ops.push(op);
|
||||||
self.batch_ops += 1;
|
self.batch_ops += 1;
|
||||||
|
|
|
@ -215,7 +215,7 @@ impl Store {
|
||||||
let mut batch = BatchBuilder::new();
|
let mut batch = BatchBuilder::new();
|
||||||
let mut last_account_id = u32::MAX;
|
let mut last_account_id = u32::MAX;
|
||||||
for (account_id, op) in delete_keys.into_iter() {
|
for (account_id, op) in delete_keys.into_iter() {
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
last_account_id = u32::MAX;
|
last_account_id = u32::MAX;
|
||||||
self.write(batch.build_all())
|
self.write(batch.build_all())
|
||||||
.await
|
.await
|
||||||
|
@ -290,7 +290,7 @@ impl Store {
|
||||||
batch.with_account_id(account_id);
|
batch.with_account_id(account_id);
|
||||||
let mut last_collection = u8::MAX;
|
let mut last_collection = u8::MAX;
|
||||||
for (collection, document_id, op) in delete_keys.into_iter() {
|
for (collection, document_id, op) in delete_keys.into_iter() {
|
||||||
if batch.len() >= 1000 {
|
if batch.is_large_batch() {
|
||||||
self.write(batch.build_all())
|
self.write(batch.build_all())
|
||||||
.await
|
.await
|
||||||
.caused_by(trc::location!())?;
|
.caused_by(trc::location!())?;
|
||||||
|
|
|
@ -90,7 +90,7 @@ async fn jmap_tests() {
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
webhooks::test(&mut params).await;
|
webhooks::test(&mut params).await;
|
||||||
/*email_query::test(&mut params, delete).await;
|
email_query::test(&mut params, delete).await;
|
||||||
email_get::test(&mut params).await;
|
email_get::test(&mut params).await;
|
||||||
email_set::test(&mut params).await;
|
email_set::test(&mut params).await;
|
||||||
email_parse::test(&mut params).await;
|
email_parse::test(&mut params).await;
|
||||||
|
@ -105,7 +105,7 @@ async fn jmap_tests() {
|
||||||
auth_acl::test(&mut params).await;
|
auth_acl::test(&mut params).await;
|
||||||
auth_limits::test(&mut params).await;
|
auth_limits::test(&mut params).await;
|
||||||
auth_oauth::test(&mut params).await;
|
auth_oauth::test(&mut params).await;
|
||||||
event_source::test(&mut params).await;*/
|
event_source::test(&mut params).await;
|
||||||
push_subscription::test(&mut params).await;
|
push_subscription::test(&mut params).await;
|
||||||
sieve_script::test(&mut params).await;
|
sieve_script::test(&mut params).await;
|
||||||
vacation_response::test(&mut params).await;
|
vacation_response::test(&mut params).await;
|
||||||
|
|
|
@ -58,9 +58,9 @@ pub async fn test(test: &WebDavTest) {
|
||||||
"D:prop.D:lockdiscovery.D:activelock.D:owner.D:href",
|
"D:prop.D:lockdiscovery.D:activelock.D:owner.D:href",
|
||||||
"super-owner",
|
"super-owner",
|
||||||
)
|
)
|
||||||
.with_value(
|
.with_any_value(
|
||||||
"D:prop.D:lockdiscovery.D:activelock.D:timeout",
|
"D:prop.D:lockdiscovery.D:activelock.D:timeout",
|
||||||
"Second-456",
|
["Second-456", "Second-455"],
|
||||||
);
|
);
|
||||||
|
|
||||||
// Test 3: Creating a collection under an unmapped resource with a lock token should fail
|
// Test 3: Creating a collection under an unmapped resource with a lock token should fail
|
||||||
|
@ -116,14 +116,17 @@ pub async fn test(test: &WebDavTest) {
|
||||||
let props = response.properties(&href);
|
let props = response.properties(&href);
|
||||||
props
|
props
|
||||||
.get(DavProperty::WebDav(WebDavProperty::LockDiscovery))
|
.get(DavProperty::WebDav(WebDavProperty::LockDiscovery))
|
||||||
.with_values([
|
.with_some_values([
|
||||||
"D:activelock.D:owner.D:href:super-owner",
|
"D:activelock.D:owner.D:href:super-owner",
|
||||||
"D:activelock.D:timeout:Second-456",
|
|
||||||
"D:activelock.D:depth:infinity",
|
"D:activelock.D:depth:infinity",
|
||||||
format!("D:activelock.D:locktoken.D:href:{lock_token}").as_str(),
|
format!("D:activelock.D:locktoken.D:href:{lock_token}").as_str(),
|
||||||
format!("D:activelock.D:lockroot.D:href:{path}").as_str(),
|
format!("D:activelock.D:lockroot.D:href:{path}").as_str(),
|
||||||
"D:activelock.D:locktype.D:write",
|
"D:activelock.D:locktype.D:write",
|
||||||
"D:activelock.D:lockscope.D:exclusive",
|
"D:activelock.D:lockscope.D:exclusive",
|
||||||
|
])
|
||||||
|
.with_any_values([
|
||||||
|
"D:activelock.D:timeout:Second-456",
|
||||||
|
"D:activelock.D:timeout:Second-455",
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -641,6 +641,24 @@ impl DavResponse {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn with_any_value<'x>(
|
||||||
|
self,
|
||||||
|
query: &str,
|
||||||
|
expect: impl IntoIterator<Item = &'x str>,
|
||||||
|
) -> Self {
|
||||||
|
let expect = expect.into_iter().collect::<AHashSet<_>>();
|
||||||
|
if let Some(value) = self.find_keys(query).next() {
|
||||||
|
if !expect.contains(value) {
|
||||||
|
self.dump_response();
|
||||||
|
panic!("Expected {query} = {expect:?} but got {value:?}");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
self.dump_response();
|
||||||
|
panic!("Key {query} not found.");
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn with_values<I, T>(self, query: &str, expect: I) -> Self
|
pub fn with_values<I, T>(self, query: &str, expect: I) -> Self
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = T>,
|
I: IntoIterator<Item = T>,
|
||||||
|
|
|
@ -837,6 +837,22 @@ impl<'x> DavQueryResult<'x> {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn with_any_values(&self, expected_values: impl IntoIterator<Item = &'x str>) -> &Self {
|
||||||
|
let values = self
|
||||||
|
.values
|
||||||
|
.iter()
|
||||||
|
.map(|s| s.as_str())
|
||||||
|
.collect::<AHashSet<_>>();
|
||||||
|
let expected_values = AHashSet::from_iter(expected_values);
|
||||||
|
|
||||||
|
if values.is_disjoint(&expected_values) {
|
||||||
|
self.response.dump_response();
|
||||||
|
panic!("Expected at least one of {expected_values:?} values, but got {values:?}",);
|
||||||
|
}
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn without_values(&self, expected_values: impl IntoIterator<Item = &'x str>) -> &Self {
|
pub fn without_values(&self, expected_values: impl IntoIterator<Item = &'x str>) -> &Self {
|
||||||
let expected_values = AHashSet::from_iter(expected_values);
|
let expected_values = AHashSet::from_iter(expected_values);
|
||||||
let values = self
|
let values = self
|
||||||
|
|
Loading…
Add table
Reference in a new issue