Calendar alarm migration implementation

This commit is contained in:
mdecimus 2025-06-07 18:02:37 +02:00
parent 7f90cc6abc
commit d48418b258
10 changed files with 503 additions and 220 deletions

3
Cargo.lock generated
View file

@ -4476,10 +4476,13 @@ version = "0.12.4"
dependencies = [
"base64 0.22.1",
"bincode 1.3.3",
"calcard",
"common",
"compact_str",
"dav-proto",
"directory",
"email",
"groupware",
"jmap_proto",
"lz4_flex",
"mail-auth",

View file

@ -120,7 +120,7 @@ impl GroupwareConfig {
.map(|s| s.to_string()),
alarms_template: Template::parse(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../resources/email-templates/calendar-alarm.html"
"/../../resources/email-templates/calendar-alarm.html.min"
)))
.expect("Failed to parse calendar template"),
}

View file

@ -72,7 +72,7 @@ pub static USER_AGENT: &str = "Stalwart/1.0.0";
pub static DAEMON_NAME: &str = concat!("Stalwart v", env!("CARGO_PKG_VERSION"),);
pub static PROD_ID: &str = "-//Stalwart Labs Ltd.//Stalwart Server//EN";
pub const DATABASE_SCHEMA_VERSION: u32 = 1;
pub const DATABASE_SCHEMA_VERSION: u32 = 2;
pub const LONG_1D_SLUMBER: Duration = Duration::from_secs(60 * 60 * 24);
pub const LONG_1Y_SLUMBER: Duration = Duration::from_secs(60 * 60 * 24 * 365);

View file

@ -14,9 +14,12 @@ common = { path = "../common" }
email = { path = "../email" }
directory = { path = "../directory" }
smtp = { path = "../smtp" }
groupware = { path = "../groupware" }
dav-proto = { path = "../dav-proto" }
mail-parser = { version = "0.11", features = ["full_encoding"] }
mail-auth = { version = "0.7.1", features = ["rkyv"] }
sieve-rs = { version = "0.7", features = ["rkyv"] }
calcard = { version = "0.1.2", features = ["rkyv"] }
tokio = { version = "1.45", features = ["net", "macros"] }
serde = { version = "1.0", features = ["derive"]}
serde_json = "1.0"

View file

@ -0,0 +1,152 @@
/*
* SPDX-FileCopyrightText: 2020 Stalwart Labs Ltd <hello@stalw.art>
*
* SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-SEL
*/
use calcard::{common::timezone::Tz, icalendar::ICalendar};
use common::{DavName, Server};
use dav_proto::schema::request::DeadProperty;
use groupware::calendar::{
AlarmDelta, CalendarEvent, CalendarEventData, ComponentTimeRange, UserProperties,
};
use jmap_proto::types::{collection::Collection, property::Property};
use store::{
Serialize,
rand::{self, seq::SliceRandom},
write::{Archiver, BatchBuilder, serialize::rkyv_deserialize},
};
use trc::AddContext;
#[derive(
rkyv::Archive, rkyv::Deserialize, rkyv::Serialize, Debug, Default, Clone, PartialEq, Eq,
)]
pub struct CalendarEventV1 {
pub names: Vec<DavName>,
pub display_name: Option<String>,
pub data: CalendarEventDataV1,
pub user_properties: Vec<UserProperties>,
pub flags: u16,
pub dead_properties: DeadProperty,
pub size: u32,
pub created: i64,
pub modified: i64,
}
#[derive(
rkyv::Archive, rkyv::Deserialize, rkyv::Serialize, Debug, Default, Clone, PartialEq, Eq,
)]
pub struct CalendarEventDataV1 {
pub event: ICalendar,
pub time_ranges: Box<[ComponentTimeRange]>,
pub alarms: Box<[AlarmV1]>,
pub base_offset: i64,
pub base_time_utc: u32,
pub duration: u32,
}
#[derive(
rkyv::Archive, rkyv::Deserialize, rkyv::Serialize, Debug, Default, Clone, PartialEq, Eq,
)]
#[rkyv(compare(PartialEq), derive(Debug))]
pub struct AlarmV1 {
pub comp_id: u16,
pub alarms: Box<[AlarmDelta]>,
}
pub(crate) async fn migrate_calendar_events(server: &Server) -> trc::Result<()> {
// Obtain email ids
let account_ids = server
.get_document_ids(u32::MAX, Collection::Principal)
.await
.caused_by(trc::location!())?
.unwrap_or_default();
let num_accounts = account_ids.len();
if num_accounts == 0 {
return Ok(());
}
let mut account_ids = account_ids.into_iter().collect::<Vec<_>>();
account_ids.shuffle(&mut rand::rng());
for account_id in account_ids {
let document_ids = server
.get_document_ids(account_id, Collection::CalendarEvent)
.await
.caused_by(trc::location!())?
.unwrap_or_default();
if document_ids.is_empty() {
continue;
}
let mut num_migrated = 0;
for document_id in document_ids.iter() {
let Some(archive) = server
.get_archive(account_id, Collection::CalendarEvent, document_id)
.await
.caused_by(trc::location!())?
else {
continue;
};
match archive.unarchive_untrusted::<CalendarEventV1>() {
Ok(event) => {
let event = rkyv_deserialize::<_, CalendarEventV1>(event).unwrap();
let mut next_email_alarm = None;
let new_event = CalendarEvent {
names: event.names,
display_name: event.display_name,
data: CalendarEventData::new(
event.data.event,
Tz::Floating,
server.core.groupware.max_ical_instances,
&mut next_email_alarm,
),
user_properties: event.user_properties,
flags: event.flags,
dead_properties: event.dead_properties,
size: event.size,
created: event.created,
modified: event.modified,
};
let mut batch = BatchBuilder::new();
batch
.with_account_id(account_id)
.with_collection(Collection::CalendarEvent)
.update_document(document_id)
.set(
Property::Value,
Archiver::new(new_event)
.serialize()
.caused_by(trc::location!())?,
);
if let Some(next_email_alarm) = next_email_alarm {
next_email_alarm.write_task(&mut batch);
}
server
.store()
.write(batch.build_all())
.await
.caused_by(trc::location!())?;
num_migrated += 1;
}
Err(err) => {
if archive.unarchive_untrusted::<CalendarEvent>().is_err() {
return Err(err.caused_by(trc::location!()));
}
}
}
}
if num_migrated > 0 {
trc::event!(
Server(trc::ServerEvent::Startup),
Details =
format!("Migrated {num_migrated} Calendar Events for account {account_id}")
);
}
}
Ok(())
}

View file

@ -4,23 +4,27 @@
* SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-SEL
*/
use std::time::Duration;
use crate::calendar::migrate_calendar_events;
use changelog::reset_changelog;
use common::{DATABASE_SCHEMA_VERSION, KV_LOCK_HOUSEKEEPER, Server};
use jmap_proto::types::{collection::Collection, property::Property};
use principal::{migrate_principal, migrate_principals};
use queue::migrate_queue;
use report::migrate_reports;
use std::time::Duration;
use store::{
Deserialize, IterateParams, SUBSPACE_PROPERTY, SUBSPACE_QUEUE_MESSAGE, SUBSPACE_REPORT_IN,
SUBSPACE_REPORT_OUT, SerializeInfallible, U32_LEN, Value, ValueKey,
SUBSPACE_REPORT_OUT, SUBSPACE_TASK_QUEUE, SerializeInfallible, U32_LEN, Value, ValueKey,
dispatch::{DocumentSet, lookup::KeyValue},
rand::{self, seq::SliceRandom},
write::{AnyClass, AnyKey, BatchBuilder, ValueClass, key::DeserializeBigEndian},
write::{
AnyClass, AnyKey, BatchBuilder, TaskQueueClass, ValueClass, key::DeserializeBigEndian, now,
},
};
use trc::AddContext;
use utils::BlobHash;
pub mod calendar;
pub mod changelog;
pub mod email;
pub mod encryption;
@ -42,17 +46,18 @@ const LOCK_RETRY_TIME: Duration = Duration::from_secs(30);
pub async fn try_migrate(server: &Server) -> trc::Result<()> {
if std::env::var("FORCE_MIGRATE_QUEUE").is_ok() {
migrate_queue(server).await.caused_by(trc::location!())?;
}
if let Some(account_id) = std::env::var("FORCE_MIGRATE_ACCOUNT")
return Ok(());
} else if let Some(account_id) = std::env::var("FORCE_MIGRATE_ACCOUNT")
.ok()
.and_then(|s| s.parse().ok())
{
migrate_principal(server, account_id)
.await
.caused_by(trc::location!())?;
return Ok(());
}
if server
match server
.store()
.get_value::<u32>(AnyKey {
subspace: SUBSPACE_PROPERTY,
@ -60,162 +65,22 @@ pub async fn try_migrate(server: &Server) -> trc::Result<()> {
})
.await
.caused_by(trc::location!())?
== Some(DATABASE_SCHEMA_VERSION)
{
return Ok(());
}
if !is_new_install(server).await.caused_by(trc::location!())? {
let force_lock = std::env::var("FORCE_LOCK").is_ok();
let in_memory = server.in_memory_store();
let principal_ids;
loop {
if force_lock
|| in_memory
.try_lock(
KV_LOCK_HOUSEKEEPER,
b"migrate_core_lock",
LOCK_WAIT_TIME_CORE,
)
.await
.caused_by(trc::location!())?
{
if in_memory
.key_get::<()>(KeyValue::<()>::build_key(
KV_LOCK_HOUSEKEEPER,
b"migrate_core_done",
))
.await
.caused_by(trc::location!())?
.is_none()
{
migrate_queue(server).await.caused_by(trc::location!())?;
migrate_reports(server).await.caused_by(trc::location!())?;
reset_changelog(server).await.caused_by(trc::location!())?;
principal_ids = migrate_principals(server)
.await
.caused_by(trc::location!())?;
in_memory
.key_set(
KeyValue::new(
KeyValue::<()>::build_key(
KV_LOCK_HOUSEKEEPER,
b"migrate_core_done",
),
b"1".to_vec(),
)
.expires(86400),
)
.await
.caused_by(trc::location!())?;
} else {
principal_ids = server
.get_document_ids(u32::MAX, Collection::Principal)
.await
.caused_by(trc::location!())?
.unwrap_or_default();
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!("Migration completed by another node.",)
);
}
in_memory
.remove_lock(KV_LOCK_HOUSEKEEPER, b"migrate_core_lock")
.await
.caused_by(trc::location!())?;
break;
} else {
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!("Migration lock busy, waiting 30 seconds.",)
);
tokio::time::sleep(LOCK_RETRY_TIME).await;
}
Some(DATABASE_SCHEMA_VERSION) => {
return Ok(());
}
if !principal_ids.is_empty() {
let mut principal_ids = principal_ids.into_iter().collect::<Vec<_>>();
principal_ids.shuffle(&mut rand::rng());
loop {
let mut skipped_principal_ids = Vec::new();
let mut num_migrated = 0;
for principal_id in principal_ids {
let lock_key = format!("migrate_{principal_id}_lock");
let done_key = format!("migrate_{principal_id}_done");
if force_lock
|| in_memory
.try_lock(
KV_LOCK_HOUSEKEEPER,
lock_key.as_bytes(),
LOCK_WAIT_TIME_ACCOUNT,
)
.await
.caused_by(trc::location!())?
{
if in_memory
.key_get::<()>(KeyValue::<()>::build_key(
KV_LOCK_HOUSEKEEPER,
done_key.as_bytes(),
))
.await
.caused_by(trc::location!())?
.is_none()
{
migrate_principal(server, principal_id)
.await
.caused_by(trc::location!())?;
num_migrated += 1;
in_memory
.key_set(
KeyValue::new(
KeyValue::<()>::build_key(
KV_LOCK_HOUSEKEEPER,
done_key.as_bytes(),
),
b"1".to_vec(),
)
.expires(86400),
)
.await
.caused_by(trc::location!())?;
}
in_memory
.remove_lock(KV_LOCK_HOUSEKEEPER, lock_key.as_bytes())
.await
.caused_by(trc::location!())?;
} else {
skipped_principal_ids.push(principal_id);
}
}
if !skipped_principal_ids.is_empty() {
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!(
"Migrated {num_migrated} accounts and {} are locked by another node, waiting 30 seconds.",
skipped_principal_ids.len()
)
);
tokio::time::sleep(LOCK_RETRY_TIME).await;
principal_ids = skipped_principal_ids;
} else {
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!("Account migration completed.",)
);
break;
}
Some(1) => {
migrate_v0_12_0(server).await.caused_by(trc::location!())?;
}
Some(version) => {
panic!(
"Unknown database schema version, expected {} or below, found {}",
DATABASE_SCHEMA_VERSION, version
);
}
_ => {
if !is_new_install(server).await.caused_by(trc::location!())? {
migrate_v0_11(server).await.caused_by(trc::location!())?;
}
}
}
@ -237,6 +102,267 @@ pub async fn try_migrate(server: &Server) -> trc::Result<()> {
Ok(())
}
async fn migrate_v0_12_0(server: &Server) -> trc::Result<()> {
let force_lock = std::env::var("FORCE_LOCK").is_ok();
let in_memory = server.in_memory_store();
loop {
if force_lock
|| in_memory
.try_lock(
KV_LOCK_HOUSEKEEPER,
b"migrate_core_lock",
LOCK_WAIT_TIME_CORE,
)
.await
.caused_by(trc::location!())?
{
let from_key = ValueKey::<ValueClass> {
account_id: 0,
collection: 0,
document_id: 0,
class: ValueClass::TaskQueue(TaskQueueClass::IndexEmail {
due: 0,
hash: BlobHash::default(),
}),
};
let to_key = ValueKey::<ValueClass> {
account_id: u32::MAX,
collection: u8::MAX,
document_id: u32::MAX,
class: ValueClass::TaskQueue(TaskQueueClass::IndexEmail {
due: u64::MAX,
hash: BlobHash::default(),
}),
};
let now = now();
let mut migrate_tasks = Vec::new();
server
.core
.storage
.data
.iterate(
IterateParams::new(from_key, to_key).ascending(),
|key, value| {
let due = key.deserialize_be_u64(0)?;
if due > now {
migrate_tasks.push((key.to_vec(), value.to_vec()));
}
Ok(true)
},
)
.await
.caused_by(trc::location!())?;
if !migrate_tasks.is_empty() {
let num_migrated = migrate_tasks.len();
let mut batch = BatchBuilder::new();
for (key, value) in migrate_tasks {
let mut new_key = key.clone();
new_key[0..8].copy_from_slice(&now.to_be_bytes());
batch
.clear(ValueClass::Any(AnyClass {
subspace: SUBSPACE_TASK_QUEUE,
key,
}))
.set(
ValueClass::Any(AnyClass {
subspace: SUBSPACE_TASK_QUEUE,
key: new_key,
}),
value,
);
}
server
.store()
.write(batch.build_all())
.await
.caused_by(trc::location!())?;
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!("Migrated {num_migrated} tasks")
);
}
in_memory
.remove_lock(KV_LOCK_HOUSEKEEPER, b"migrate_core_lock")
.await
.caused_by(trc::location!())?;
break;
} else {
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!("Migration lock busy, waiting 30 seconds.",)
);
tokio::time::sleep(LOCK_RETRY_TIME).await;
}
}
migrate_calendar_events(server)
.await
.caused_by(trc::location!())
}
async fn migrate_v0_11(server: &Server) -> trc::Result<()> {
let force_lock = std::env::var("FORCE_LOCK").is_ok();
let in_memory = server.in_memory_store();
let principal_ids;
loop {
if force_lock
|| in_memory
.try_lock(
KV_LOCK_HOUSEKEEPER,
b"migrate_core_lock",
LOCK_WAIT_TIME_CORE,
)
.await
.caused_by(trc::location!())?
{
if in_memory
.key_get::<()>(KeyValue::<()>::build_key(
KV_LOCK_HOUSEKEEPER,
b"migrate_core_done",
))
.await
.caused_by(trc::location!())?
.is_none()
{
migrate_queue(server).await.caused_by(trc::location!())?;
migrate_reports(server).await.caused_by(trc::location!())?;
reset_changelog(server).await.caused_by(trc::location!())?;
principal_ids = migrate_principals(server)
.await
.caused_by(trc::location!())?;
in_memory
.key_set(
KeyValue::new(
KeyValue::<()>::build_key(KV_LOCK_HOUSEKEEPER, b"migrate_core_done"),
b"1".to_vec(),
)
.expires(86400),
)
.await
.caused_by(trc::location!())?;
} else {
principal_ids = server
.get_document_ids(u32::MAX, Collection::Principal)
.await
.caused_by(trc::location!())?
.unwrap_or_default();
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!("Migration completed by another node.",)
);
}
in_memory
.remove_lock(KV_LOCK_HOUSEKEEPER, b"migrate_core_lock")
.await
.caused_by(trc::location!())?;
break;
} else {
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!("Migration lock busy, waiting 30 seconds.",)
);
tokio::time::sleep(LOCK_RETRY_TIME).await;
}
}
if !principal_ids.is_empty() {
let mut principal_ids = principal_ids.into_iter().collect::<Vec<_>>();
principal_ids.shuffle(&mut rand::rng());
loop {
let mut skipped_principal_ids = Vec::new();
let mut num_migrated = 0;
for principal_id in principal_ids {
let lock_key = format!("migrate_{principal_id}_lock");
let done_key = format!("migrate_{principal_id}_done");
if force_lock
|| in_memory
.try_lock(
KV_LOCK_HOUSEKEEPER,
lock_key.as_bytes(),
LOCK_WAIT_TIME_ACCOUNT,
)
.await
.caused_by(trc::location!())?
{
if in_memory
.key_get::<()>(KeyValue::<()>::build_key(
KV_LOCK_HOUSEKEEPER,
done_key.as_bytes(),
))
.await
.caused_by(trc::location!())?
.is_none()
{
migrate_principal(server, principal_id)
.await
.caused_by(trc::location!())?;
num_migrated += 1;
in_memory
.key_set(
KeyValue::new(
KeyValue::<()>::build_key(
KV_LOCK_HOUSEKEEPER,
done_key.as_bytes(),
),
b"1".to_vec(),
)
.expires(86400),
)
.await
.caused_by(trc::location!())?;
}
in_memory
.remove_lock(KV_LOCK_HOUSEKEEPER, lock_key.as_bytes())
.await
.caused_by(trc::location!())?;
} else {
skipped_principal_ids.push(principal_id);
}
}
if !skipped_principal_ids.is_empty() {
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!(
"Migrated {num_migrated} accounts and {} are locked by another node, waiting 30 seconds.",
skipped_principal_ids.len()
)
);
tokio::time::sleep(LOCK_RETRY_TIME).await;
principal_ids = skipped_principal_ids;
} else {
trc::event!(
Server(trc::ServerEvent::Startup),
Details = format!("Account migration completed.",)
);
break;
}
}
}
Ok(())
}
async fn is_new_install(server: &Server) -> trc::Result<bool> {
for subspace in [
SUBSPACE_QUEUE_MESSAGE,
@ -369,29 +495,3 @@ impl<T: serde::de::DeserializeOwned + Sized + Sync + Send> Deserialize for Legac
.map(|inner| Self { inner })
}
}
/*
#[derive(
rkyv::Archive, rkyv::Deserialize, rkyv::Serialize, Debug, Default, Clone, PartialEq, Eq,
)]
pub struct CalendarEventData {
pub event: ICalendar,
pub time_ranges: Box<[ComponentTimeRange]>,
pub alarms: Box<[Alarm]>,
pub base_offset: i64,
pub base_time_utc: u32,
pub duration: u32,
}
#[derive(
rkyv::Archive, rkyv::Deserialize, rkyv::Serialize, Debug, Default, Clone, PartialEq, Eq,
)]
#[rkyv(compare(PartialEq), derive(Debug))]
pub struct Alarm {
pub comp_id: u16,
pub alarms: Box<[AlarmDelta]>,
}
*/

View file

@ -78,14 +78,31 @@
}
}
</style>
<style type="text/css">
.event-detail {
font-weight: bold;
color: #2c5aa0;
}
.guest-list {
background-color: #f8f9fa;
padding: 10px;
border-radius: 4px;
margin-top: 5px;
}
:root {
color-scheme: light only;
}
</style>
</head>
<body style="word-spacing:normal;background-color:#f4f4f4;">
<div style="background-color:#f4f4f4;">
<!--[if mso | IE]><table align="center" border="0" cellpadding="0" cellspacing="0" class="" style="width:600px;" width="600" bgcolor="#ffffff" ><tr><td style="line-height:0px;font-size:0px;mso-line-height-rule:exactly;"><![endif]-->
<div style="background:#ffffff;background-color:#ffffff;margin:0px auto;max-width:600px;">
<div style="background-color:#ffffff;margin:0px auto;max-width:600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" role="presentation"
style="background:#ffffff;background-color:#ffffff;width:100%;">
style="background-color:#ffffff;width:100%;">
<tbody>
<tr>
<td style="direction:ltr;font-size:0px;padding:20px 0;text-align:center;">
@ -118,9 +135,9 @@
</table>
</div>
<!--[if mso | IE]></td></tr></table><![endif]--><!-- Main Content --><!--[if mso | IE]><table align="center" border="0" cellpadding="0" cellspacing="0" class="" style="width:600px;" width="600" bgcolor="#ffffff" ><tr><td style="line-height:0px;font-size:0px;mso-line-height-rule:exactly;"><![endif]-->
<div style="background:#ffffff;background-color:#ffffff;margin:0px auto;max-width:600px;">
<div style="background-color:#ffffff;margin:0px auto;max-width:600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" role="presentation"
style="background:#ffffff;background-color:#ffffff;width:100%;">
style="background-color:#ffffff;width:100%;">
<tbody>
<tr>
<td style="direction:ltr;font-size:0px;padding:0 20px;text-align:center;">
@ -207,9 +224,9 @@
</table>
</div>
<!--[if mso | IE]></td></tr></table><![endif]--><!-- Footer --><!--[if mso | IE]><table align="center" border="0" cellpadding="0" cellspacing="0" class="" style="width:600px;" width="600" bgcolor="#f8f9fa" ><tr><td style="line-height:0px;font-size:0px;mso-line-height-rule:exactly;"><![endif]-->
<div style="background:#f8f9fa;background-color:#f8f9fa;margin:0px auto;max-width:600px;">
<div style="background-color:#f8f9fa;margin:0px auto;max-width:600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" role="presentation"
style="background:#f8f9fa;background-color:#f8f9fa;width:100%;">
style="background-color:#f8f9fa;width:100%;">
<tbody>
<tr>
<td style="direction:ltr;font-size:0px;padding:20px;text-align:center;">

File diff suppressed because one or more lines are too long

View file

@ -6,7 +6,10 @@
<mj-text font-size="14px" color="#333333" line-height="1.5" />
<mj-section background-color="#ffffff" />
</mj-attributes>
<mj-style inline="inline">
<mj-style>
:root {
color-scheme: light only;
}
.event-detail {
font-weight: bold;
color: #2c5aa0;

View file

@ -8,7 +8,7 @@ use super::WebDavTest;
use email::{cache::MessageCacheFetch, message::metadata::MessageMetadata};
use hyper::StatusCode;
use jmap_proto::types::{collection::Collection, property::Property};
use mail_parser::DateTime;
use mail_parser::{DateTime, MessageParser};
use store::write::now;
pub async fn test(test: &WebDavTest) {
@ -51,28 +51,32 @@ pub async fn test(test: &WebDavTest) {
.await
.unwrap()
.unwrap();
let contents = String::from_utf8(
test.server
.blob_store()
.get_blob(
metadata_
.unarchive::<MessageMetadata>()
.unwrap()
.blob_hash
.0
.as_slice(),
0..usize::MAX,
)
.await
.unwrap()
.unwrap(),
)
.unwrap();
/*std::fs::write(
format!("message_{}.eml", message.document_id),
contents.as_bytes(),
)
.unwrap();*/
let contents = test
.server
.blob_store()
.get_blob(
metadata_
.unarchive::<MessageMetadata>()
.unwrap()
.blob_hash
.0
.as_slice(),
0..usize::MAX,
)
.await
.unwrap()
.unwrap();
//let t = std::fs::write(format!("message_{}.eml", message.document_id), &contents).unwrap();
let message = MessageParser::new().parse(&contents).unwrap();
let contents = message
.html_bodies()
.next()
.unwrap()
.text_contents()
.unwrap();
if idx == 0 {
// First alarm does not have a summary or description
assert!(
@ -89,7 +93,7 @@ pub async fn test(test: &WebDavTest) {
"failed for {contents}"
);
assert!(
contents.contains("It's alarming how charming I feel."),
contents.contains("It&#39;s alarming how charming I feel."),
"failed for {contents}"
);
}