diff --git a/services/backup/src/database/mod.rs b/services/backup/src/database/mod.rs index 1483d1c9b..0a6408cef 100644 --- a/services/backup/src/database/mod.rs +++ b/services/backup/src/database/mod.rs @@ -1,282 +1,366 @@ pub mod backup_item; pub mod log_item; use std::collections::HashMap; use aws_sdk_dynamodb::{ - operation::get_item::GetItemOutput, types::AttributeValue, + operation::get_item::GetItemOutput, + types::{AttributeValue, ReturnValue}, }; use comm_services_lib::database::Error; -use tracing::error; +use tracing::{error, trace, warn}; use crate::constants::{ BACKUP_TABLE_FIELD_BACKUP_ID, BACKUP_TABLE_FIELD_USER_ID, BACKUP_TABLE_INDEX_USERID_CREATED, BACKUP_TABLE_NAME, LOG_TABLE_FIELD_ATTACHMENT_HOLDERS, LOG_TABLE_FIELD_BACKUP_ID, LOG_TABLE_FIELD_DATA_HASH, LOG_TABLE_FIELD_LOG_ID, LOG_TABLE_FIELD_PERSISTED_IN_BLOB, LOG_TABLE_FIELD_VALUE, LOG_TABLE_NAME, }; use self::{ backup_item::{BackupItem, OrderedBackupItem}, log_item::{parse_log_item, LogItem}, }; #[derive(Clone)] pub struct DatabaseClient { client: aws_sdk_dynamodb::Client, } impl DatabaseClient { pub fn new(aws_config: &aws_types::SdkConfig) -> Self { DatabaseClient { client: aws_sdk_dynamodb::Client::new(aws_config), } } // backup item pub async fn put_backup_item( &self, backup_item: BackupItem, ) -> Result<(), Error> { let item = backup_item.into(); self .client .put_item() .table_name(BACKUP_TABLE_NAME) .set_item(Some(item)) .send() .await .map_err(|e| { error!("DynamoDB client failed to put backup item"); Error::AwsSdk(e.into()) })?; Ok(()) } pub async fn find_backup_item( &self, user_id: &str, backup_id: &str, ) -> Result, Error> { - let item_key = HashMap::from([ - ( - BACKUP_TABLE_FIELD_USER_ID.to_string(), - AttributeValue::S(user_id.to_string()), - ), - ( - BACKUP_TABLE_FIELD_BACKUP_ID.to_string(), - AttributeValue::S(backup_id.to_string()), - ), - ]); + let item_key = Self::get_item_key(user_id, backup_id); let output = self .client .get_item() .table_name(BACKUP_TABLE_NAME) .set_key(Some(item_key)) .send() .await .map_err(|e| { error!("DynamoDB client failed to find backup item"); Error::AwsSdk(e.into()) })?; let GetItemOutput { item: Some(item), .. } = output else { return Ok(None) }; let backup_item = item.try_into()?; Ok(Some(backup_item)) } pub async fn find_last_backup_item( &self, user_id: &str, ) -> Result, Error> { let response = self .client .query() .table_name(BACKUP_TABLE_NAME) .index_name(BACKUP_TABLE_INDEX_USERID_CREATED) .key_condition_expression("#userID = :valueToMatch") .expression_attribute_names("#userID", BACKUP_TABLE_FIELD_USER_ID) .expression_attribute_values( ":valueToMatch", AttributeValue::S(user_id.to_string()), ) .limit(1) .scan_index_forward(false) .send() .await .map_err(|e| { error!("DynamoDB client failed to find last backup"); Error::AwsSdk(e.into()) })?; match response.items.unwrap_or_default().pop() { Some(item) => { let backup_item = item.try_into()?; Ok(Some(backup_item)) } None => Ok(None), } } - pub async fn remove_backup_item(&self, backup_id: &str) -> Result<(), Error> { - self + pub async fn remove_backup_item( + &self, + user_id: &str, + backup_id: &str, + ) -> Result, Error> { + let item_key = Self::get_item_key(user_id, backup_id); + + let response = self .client .delete_item() .table_name(BACKUP_TABLE_NAME) - .key( - BACKUP_TABLE_FIELD_BACKUP_ID, - AttributeValue::S(backup_id.to_string()), - ) + .set_key(Some(item_key)) + .return_values(ReturnValue::AllOld) .send() .await .map_err(|e| { error!("DynamoDB client failed to remove backup item"); Error::AwsSdk(e.into()) })?; - Ok(()) + response + .attributes + .map(BackupItem::try_from) + .transpose() + .map_err(Error::from) + } + + /// For the purposes of the initial backup version this function + /// removes all backups except for the latest one + pub async fn remove_old_backups( + &self, + user_id: &str, + ) -> Result, Error> { + let response = self + .client + .query() + .table_name(BACKUP_TABLE_NAME) + .index_name(BACKUP_TABLE_INDEX_USERID_CREATED) + .key_condition_expression("#userID = :valueToMatch") + .expression_attribute_names("#userID", BACKUP_TABLE_FIELD_USER_ID) + .expression_attribute_values( + ":valueToMatch", + AttributeValue::S(user_id.to_string()), + ) + .scan_index_forward(false) + .send() + .await + .map_err(|e| { + error!("DynamoDB client failed to fetch backups"); + Error::AwsSdk(e.into()) + })?; + + if response.last_evaluated_key().is_some() { + // In the intial version of the backup service this function will be run + // for every new backup (each user only has one backup), so this shouldn't + // happen + warn!("Not all old backups have been cleaned up"); + } + + let items = response + .items + .unwrap_or_default() + .into_iter() + .map(OrderedBackupItem::try_from) + .collect::, _>>()?; + + let mut removed_backups = vec![]; + + let Some(latest) = items.iter().map(|item| item.created).max() else { + return Ok(removed_backups); + }; + + for item in items { + if item.created == latest { + trace!( + "Skipping removal of the latest backup item: {}", + item.backup_id + ); + continue; + } + + trace!("Removing backup item: {item:?}"); + + if let Some(backup) = + self.remove_backup_item(user_id, &item.backup_id).await? + { + removed_backups.push(backup); + } else { + warn!("Backup was found during query, but wasn't found when deleting") + }; + } + + Ok(removed_backups) + } + + fn get_item_key( + user_id: &str, + backup_id: &str, + ) -> HashMap { + HashMap::from([ + ( + BACKUP_TABLE_FIELD_USER_ID.to_string(), + AttributeValue::S(user_id.to_string()), + ), + ( + BACKUP_TABLE_FIELD_BACKUP_ID.to_string(), + AttributeValue::S(backup_id.to_string()), + ), + ]) } // log item pub async fn put_log_item(&self, log_item: LogItem) -> Result<(), Error> { let item = HashMap::from([ ( LOG_TABLE_FIELD_BACKUP_ID.to_string(), AttributeValue::S(log_item.backup_id), ), ( LOG_TABLE_FIELD_LOG_ID.to_string(), AttributeValue::S(log_item.log_id), ), ( LOG_TABLE_FIELD_PERSISTED_IN_BLOB.to_string(), AttributeValue::Bool(log_item.persisted_in_blob), ), ( LOG_TABLE_FIELD_VALUE.to_string(), AttributeValue::S(log_item.value), ), ( LOG_TABLE_FIELD_DATA_HASH.to_string(), AttributeValue::S(log_item.data_hash), ), ( LOG_TABLE_FIELD_ATTACHMENT_HOLDERS.to_string(), AttributeValue::S(log_item.attachment_holders), ), ]); self .client .put_item() .table_name(LOG_TABLE_NAME) .set_item(Some(item)) .send() .await .map_err(|e| { error!("DynamoDB client failed to put log item"); Error::AwsSdk(e.into()) })?; Ok(()) } pub async fn find_log_item( &self, backup_id: &str, log_id: &str, ) -> Result, Error> { let item_key = HashMap::from([ ( LOG_TABLE_FIELD_BACKUP_ID.to_string(), AttributeValue::S(backup_id.to_string()), ), ( LOG_TABLE_FIELD_LOG_ID.to_string(), AttributeValue::S(log_id.to_string()), ), ]); match self .client .get_item() .table_name(LOG_TABLE_NAME) .set_key(Some(item_key)) .send() .await .map_err(|e| { error!("DynamoDB client failed to find log item"); Error::AwsSdk(e.into()) })? { GetItemOutput { item: Some(item), .. } => { let log_item = parse_log_item(item)?; Ok(Some(log_item)) } _ => Ok(None), } } pub async fn find_log_items_for_backup( &self, backup_id: &str, ) -> Result, Error> { let response = self .client .query() .table_name(LOG_TABLE_NAME) .key_condition_expression("#backupID = :valueToMatch") .expression_attribute_names("#backupID", LOG_TABLE_FIELD_BACKUP_ID) .expression_attribute_values( ":valueToMatch", AttributeValue::S(backup_id.to_string()), ) .send() .await .map_err(|e| { error!("DynamoDB client failed to find log items for backup"); Error::AwsSdk(e.into()) })?; if response.count == 0 { return Ok(Vec::new()); } let mut results: Vec = Vec::with_capacity(response.count() as usize); for item in response.items.unwrap_or_default() { let log_item = parse_log_item(item)?; results.push(log_item); } Ok(results) } pub async fn remove_log_item(&self, log_id: &str) -> Result<(), Error> { self .client .delete_item() .table_name(LOG_TABLE_NAME) .key( LOG_TABLE_FIELD_LOG_ID, AttributeValue::S(log_id.to_string()), ) .send() .await .map_err(|e| { error!("DynamoDB client failed to remove log item"); Error::AwsSdk(e.into()) })?; Ok(()) } } diff --git a/services/backup/src/http/handlers/backup.rs b/services/backup/src/http/handlers/backup.rs index 3b551f6df..b331b243b 100644 --- a/services/backup/src/http/handlers/backup.rs +++ b/services/backup/src/http/handlers/backup.rs @@ -1,275 +1,291 @@ use std::{collections::HashSet, convert::Infallible}; use actix_web::{ error::ErrorBadRequest, web::{self, Bytes}, HttpResponse, Responder, }; use comm_services_lib::{ auth::UserIdentity, backup::LatestBackupIDResponse, blob::{client::BlobServiceClient, types::BlobInfo}, http::multipart::{get_named_text_field, get_text_field}, tools::Defer, }; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; use tracing::{info, instrument, trace, warn}; use crate::{ database::{backup_item::BackupItem, DatabaseClient}, error::BackupError, }; #[instrument(name = "upload_backup", skip_all, fields(backup_id))] pub async fn upload( user: UserIdentity, blob_client: web::Data, db_client: web::Data, mut multipart: actix_multipart::Multipart, ) -> actix_web::Result { info!("Upload backup request"); let backup_id = get_named_text_field("backup_id", &mut multipart).await?; tracing::Span::current().record("backup_id", &backup_id); let (user_keys_blob_info, user_keys_revoke) = forward_field_to_blob( &mut multipart, &blob_client, "user_keys_hash", "user_keys", ) .await?; let (user_data_blob_info, user_data_revoke) = forward_field_to_blob( &mut multipart, &blob_client, "user_data_hash", "user_data", ) .await?; let attachments_holders: HashSet = match get_text_field(&mut multipart).await? { Some((name, attachments)) => { if name != "attachments" { warn!( name, "Malformed request: 'attachments' text field expected." ); return Err(ErrorBadRequest("Bad request")); } attachments.lines().map(ToString::to_string).collect() } None => HashSet::new(), }; let item = BackupItem::new( - user.user_id, + user.user_id.clone(), backup_id, user_keys_blob_info, user_data_blob_info, attachments_holders, ); db_client .put_backup_item(item) .await .map_err(BackupError::from)?; user_keys_revoke.cancel(); user_data_revoke.cancel(); + for backup in db_client + .remove_old_backups(&user.user_id) + .await + .map_err(BackupError::from)? + { + blob_client.schedule_revoke_holder( + backup.user_keys.blob_hash, + backup.user_keys.holder, + ); + + blob_client.schedule_revoke_holder( + backup.user_data.blob_hash, + backup.user_data.holder, + ); + } + Ok(HttpResponse::Ok().finish()) } #[instrument( skip_all, name = "forward_to_blob", fields(hash_field_name, data_field_name) )] async fn forward_field_to_blob<'revoke, 'blob: 'revoke>( multipart: &mut actix_multipart::Multipart, blob_client: &'blob web::Data, hash_field_name: &str, data_field_name: &str, ) -> actix_web::Result<(BlobInfo, Defer<'revoke>)> { trace!("Reading blob fields: {hash_field_name:?}, {data_field_name:?}"); let blob_hash = get_named_text_field(hash_field_name, multipart).await?; let Some(mut field) = multipart.try_next().await? else { warn!("Malformed request: expected a field."); return Err(ErrorBadRequest("Bad request"))?; }; if field.name() != data_field_name { warn!( hash_field_name, "Malformed request: '{data_field_name}' data field expected." ); return Err(ErrorBadRequest("Bad request"))?; } let blob_info = BlobInfo { blob_hash, holder: uuid::Uuid::new_v4().to_string(), }; // [`actix_multipart::Multipart`] isn't [`std::marker::Send`], and so we cannot pass it to the blob client directly. // Instead we have to forward it to a channel and create stream from the receiver. let (tx, rx) = tokio::sync::mpsc::channel(1); let receive_promise = async move { trace!("Receiving blob data"); // [`actix_multipart::MultipartError`] isn't [`std::marker::Send`] so we return it here, and pass [`Infallible`] // as the error to the channel while let Some(chunk) = field.try_next().await? { if let Err(err) = tx.send(Result::::Ok(chunk)).await { warn!("Error when sending data through a channel: '{err}'"); // Error here means that the channel has been closed from the blob client side. We don't want to return an error // here, because `tokio::try_join!` only returns the first error it receives and we want to prioritize the backup // client error. break; } } trace!("Finished receiving blob data"); Result::<(), actix_web::Error>::Ok(()) }; let data_stream = ReceiverStream::new(rx); let send_promise = async { blob_client .simple_put(&blob_info.blob_hash, &blob_info.holder, data_stream) .await .map_err(BackupError::from)?; Ok(()) }; tokio::try_join!(receive_promise, send_promise)?; let revoke_info = blob_info.clone(); let revoke_holder = Defer::new(|| { blob_client .schedule_revoke_holder(revoke_info.blob_hash, revoke_info.holder) }); Ok((blob_info, revoke_holder)) } #[instrument(name = "download_user_keys", skip_all, fields(backup_id = %path.as_str()))] pub async fn download_user_keys( user: UserIdentity, path: web::Path, blob_client: web::Data, db_client: web::Data, ) -> actix_web::Result { info!("Download user keys request"); let backup_id = path.into_inner(); download_user_blob( |item| &item.user_keys, &user.user_id, &backup_id, blob_client, db_client, ) .await } #[instrument(name = "download_user_data", skip_all, fields(backup_id = %path.as_str()))] pub async fn download_user_data( user: UserIdentity, path: web::Path, blob_client: web::Data, db_client: web::Data, ) -> actix_web::Result { info!("Download user data request"); let backup_id = path.into_inner(); download_user_blob( |item| &item.user_data, &user.user_id, &backup_id, blob_client, db_client, ) .await } pub async fn download_user_blob( data_extractor: impl FnOnce(&BackupItem) -> &BlobInfo, user_id: &str, backup_id: &str, blob_client: web::Data, db_client: web::Data, ) -> actix_web::Result { let backup_item = db_client .find_backup_item(user_id, backup_id) .await .map_err(BackupError::from)? .ok_or(BackupError::NoBackup)?; let stream = blob_client .get(&data_extractor(&backup_item).blob_hash) .await .map_err(BackupError::from)?; Ok( HttpResponse::Ok() .content_type("application/octet-stream") .streaming(stream), ) } #[instrument(name = "get_latest_backup_id", skip_all, fields(username = %path.as_str()))] pub async fn get_latest_backup_id( path: web::Path, db_client: web::Data, ) -> actix_web::Result { let username = path.into_inner(); // Treat username as user_id in the initial version let user_id = username; let Some(backup_item) = db_client .find_last_backup_item(&user_id) .await .map_err(BackupError::from)? else { return Err(BackupError::NoBackup.into()); }; let response = LatestBackupIDResponse { backup_id: backup_item.backup_id, }; Ok(web::Json(response)) } #[instrument(name = "download_latest_backup_keys", skip_all, fields(username = %path.as_str()))] pub async fn download_latest_backup_keys( path: web::Path, db_client: web::Data, blob_client: web::Data, ) -> actix_web::Result { let username = path.into_inner(); // Treat username as user_id in the initial version let user_id = username; let Some(backup_item) = db_client .find_last_backup_item(&user_id) .await .map_err(BackupError::from)? else { return Err(BackupError::NoBackup.into()); }; let stream = blob_client .get(&backup_item.user_keys.blob_hash) .await .map_err(BackupError::from)?; Ok( HttpResponse::Ok() .content_type("application/octet-stream") .streaming(stream), ) } diff --git a/services/commtest/tests/backup_integration_test.rs b/services/commtest/tests/backup_integration_test.rs index a627a3351..ced5ad3c7 100644 --- a/services/commtest/tests/backup_integration_test.rs +++ b/services/commtest/tests/backup_integration_test.rs @@ -1,110 +1,128 @@ use bytesize::ByteSize; use comm_services_lib::{auth::UserIdentity, backup::LatestBackupIDResponse}; use commtest::{ backup::{ backup_utils::BackupData, create_new_backup, pull_backup::{self, BackupDescriptor, RequestedData}, }, tools::{generate_stable_nbytes, Error}, }; +use reqwest::StatusCode; use std::env; #[tokio::test] async fn backup_integration_test() -> Result<(), Error> { let port = env::var("COMM_SERVICES_PORT_BACKUP") .expect("port env var expected but not received") .parse() .expect("port env var should be a number"); let mut url = reqwest::Url::parse("http://localhost")?; url.set_port(Some(port)).expect("failed to set port"); let backup_datas = [ BackupData { backup_id: "b1".to_string(), user_keys_hash: "kh1".to_string(), user_keys: generate_stable_nbytes( ByteSize::kib(4).as_u64() as usize, Some(b'a'), ), user_data_hash: "dh1".to_string(), user_data: generate_stable_nbytes( ByteSize::mib(4).as_u64() as usize, Some(b'A'), ), attachments: vec![], }, BackupData { backup_id: "b2".to_string(), user_keys_hash: "kh2".to_string(), user_keys: generate_stable_nbytes( ByteSize::kib(4).as_u64() as usize, Some(b'b'), ), user_data_hash: "dh2".to_string(), user_data: generate_stable_nbytes( ByteSize::mib(4).as_u64() as usize, Some(b'B'), ), attachments: vec![], }, ]; let user_identity = UserIdentity { user_id: "1".to_string(), access_token: "dummy access token".to_string(), device_id: "dummy device_id".to_string(), }; create_new_backup::run(url.clone(), &user_identity, &backup_datas[0]).await?; create_new_backup::run(url.clone(), &user_identity, &backup_datas[1]).await?; // Test direct lookup let second_backup_descriptor = BackupDescriptor::BackupID { backup_id: backup_datas[1].backup_id.clone(), user_identity: user_identity.clone(), }; let user_keys = pull_backup::run( url.clone(), second_backup_descriptor.clone(), RequestedData::UserKeys, ) .await?; assert_eq!(user_keys, backup_datas[1].user_keys); let user_data = pull_backup::run( url.clone(), second_backup_descriptor.clone(), RequestedData::UserData, ) .await?; assert_eq!(user_data, backup_datas[1].user_data); // Test latest backup lookup let latest_backup_descriptor = BackupDescriptor::Latest { // Initial version of the backup service uses `user_id` in place of a username username: "1".to_string(), }; let backup_id_response = pull_backup::run( url.clone(), latest_backup_descriptor.clone(), RequestedData::BackupID, ) .await?; let response: LatestBackupIDResponse = serde_json::from_slice(&backup_id_response)?; assert_eq!(response.backup_id, backup_datas[1].backup_id); let user_keys = pull_backup::run( url.clone(), latest_backup_descriptor.clone(), RequestedData::UserKeys, ) .await?; assert_eq!(user_keys, backup_datas[1].user_keys); + // Test cleanup + let first_backup_descriptor = BackupDescriptor::BackupID { + backup_id: backup_datas[0].backup_id.clone(), + user_identity: user_identity.clone(), + }; + + let response = pull_backup::run( + url.clone(), + first_backup_descriptor.clone(), + RequestedData::UserKeys, + ) + .await; + assert!( + matches!(response, Err(Error::HttpStatus(StatusCode::NOT_FOUND))), + "First backup should have been removed, instead got response: {response:?}" + ); + Ok(()) }