diff --git a/services/backup/src/database/log_item.rs b/services/backup/src/database/log_item.rs --- a/services/backup/src/database/log_item.rs +++ b/services/backup/src/database/log_item.rs @@ -84,6 +84,27 @@ ), ]) } + + /// Assigns a new backup ID for this log item. This also refreshes holders + /// for all [`BlobInfo`]s of this log. + pub fn reassign_backup_id_and_holders(&mut self, new_backup_id: String) { + self.backup_id = new_backup_id; + + if let BlobOrDBContent::Blob(ref mut blob_info) = self.content { + blob_info.holder = uuid::Uuid::new_v4().to_string(); + } + for attachment in &mut self.attachments { + attachment.holder = uuid::Uuid::new_v4().to_string(); + } + } + + pub fn blob_infos(&self) -> Vec { + let mut blobs = self.attachments.clone(); + if let BlobOrDBContent::Blob(content_blob) = &self.content { + blobs.push(content_blob.clone()); + } + blobs + } } impl From for HashMap { diff --git a/services/backup/src/database/mod.rs b/services/backup/src/database/mod.rs --- a/services/backup/src/database/mod.rs +++ b/services/backup/src/database/mod.rs @@ -10,7 +10,9 @@ }; use aws_sdk_dynamodb::{ operation::get_item::GetItemOutput, - types::{AttributeValue, DeleteRequest, ReturnValue, WriteRequest}, + types::{ + AttributeValue, DeleteRequest, PutRequest, ReturnValue, WriteRequest, + }, }; use comm_lib::{ blob::{client::BlobServiceClient, types::BlobInfo}, @@ -18,6 +20,7 @@ self, batch_operations::ExponentialBackoffConfig, parse_int_attribute, AttributeMap, Error, }, + tools::Defer, }; use tracing::{error, trace, warn}; @@ -392,6 +395,63 @@ Ok(()) } + + /// Copies all log items from [`old_backup_id`] to [`new_backup_id`]. + /// Assigns new holders to all logs' [`BlobInfo`]s, and returns + /// a [`Defer'] revoke object that removes these holders unless canceled. + #[must_use = "Holders will be discarded unless returned revoke is canceled"] + pub async fn copy_log_items_to_new_backup<'revoke, 'blob: 'revoke>( + &self, + user_id: &str, + old_backup_id: &str, + new_backup_id: &str, + blob_client: &'blob BlobServiceClient, + ) -> Result, crate::error::BackupError> { + // 0. Fetch logs for old backup + let mut items = self + .fetch_all_log_items_for_backup(user_id, old_backup_id) + .await?; + + // 1. Update backup ID, create new random holders for blobs + for log_item in &mut items { + log_item.reassign_backup_id_and_holders(new_backup_id.to_string()); + } + + // 2. Assign new holders on Blob service + let blob_infos: Vec = + items.iter().flat_map(LogItem::blob_infos).collect(); + let assigned_holder_infos = blob_client + .assign_multiple_holders_with_retries(blob_infos, Default::default()) + .await?; + + let revoke = Defer::new(|| { + for BlobInfo { blob_hash, holder } in assigned_holder_infos { + blob_client.schedule_revoke_holder(blob_hash, holder); + } + }); + + // 3. Store new logs in DDB + let write_requests = items + .into_iter() + .map(|log_item| { + let put_request = PutRequest::builder() + .set_item(Some(log_item.into())) + .build() + .expect("item not set in PutRequest builder"); + WriteRequest::builder().put_request(put_request).build() + }) + .collect::>(); + + database::batch_operations::batch_write( + &self.client, + log_table::TABLE_NAME, + write_requests, + ExponentialBackoffConfig::default(), + ) + .await?; + + Ok(revoke) + } } // general functions diff --git a/shared/comm-lib/src/tools.rs b/shared/comm-lib/src/tools.rs --- a/shared/comm-lib/src/tools.rs +++ b/shared/comm-lib/src/tools.rs @@ -48,6 +48,7 @@ /// } /// } /// ``` +#[must_use = "Defer will immediately go out of scope if not used"] pub struct Defer<'s>(Option>); impl<'s> Defer<'s> {