diff --git a/services/backup/src/constants.rs b/services/backup/src/constants.rs index 0ac0e36c1..bb7778faf 100644 --- a/services/backup/src/constants.rs +++ b/services/backup/src/constants.rs @@ -1,35 +1,36 @@ // Assorted constants pub const AWS_REGION: &str = "us-east-2"; pub const MPSC_CHANNEL_BUFFER_CAPACITY: usize = 1; +pub const ID_SEPARATOR: &str = ":"; // Configuration defaults pub const DEFAULT_GRPC_SERVER_PORT: u64 = 50051; pub const DEFAULT_LOCALSTACK_URL: &str = "http://localhost:4566"; pub const DEFAULT_BLOB_SERVICE_URL: &str = "http://localhost:50053"; // Environment variable names pub const SANDBOX_ENV_VAR: &str = "COMM_SERVICES_SANDBOX"; pub const LOG_LEVEL_ENV_VAR: &str = tracing_subscriber::filter::EnvFilter::DEFAULT_ENV; // DynamoDB constants pub const BACKUP_TABLE_NAME: &str = "backup-service-backup"; pub const BACKUP_TABLE_FIELD_USER_ID: &str = "userID"; pub const BACKUP_TABLE_FIELD_BACKUP_ID: &str = "backupID"; pub const BACKUP_TABLE_FIELD_CREATED: &str = "created"; pub const BACKUP_TABLE_FIELD_RECOVERY_DATA: &str = "recoveryData"; pub const BACKUP_TABLE_FIELD_COMPACTION_HOLDER: &str = "compactionHolder"; pub const BACKUP_TABLE_FIELD_ATTACHMENT_HOLDERS: &str = "attachmentHolders"; pub const BACKUP_TABLE_INDEX_USERID_CREATED: &str = "userID-created-index"; pub const LOG_TABLE_NAME: &str = "backup-service-log"; pub const LOG_TABLE_FIELD_BACKUP_ID: &str = "backupID"; pub const LOG_TABLE_FIELD_LOG_ID: &str = "logID"; pub const LOG_TABLE_FIELD_PERSISTED_IN_BLOB: &str = "persistedInBlob"; pub const LOG_TABLE_FIELD_VALUE: &str = "value"; pub const LOG_TABLE_FIELD_ATTACHMENT_HOLDERS: &str = "attachmentHolders"; pub const LOG_TABLE_FIELD_DATA_HASH: &str = "dataHash"; diff --git a/services/backup/src/database.rs b/services/backup/src/database.rs index feb1cdcce..1ed54d500 100644 --- a/services/backup/src/database.rs +++ b/services/backup/src/database.rs @@ -1,504 +1,525 @@ use aws_sdk_dynamodb::{ model::AttributeValue, output::GetItemOutput, Error as DynamoDBError, }; use chrono::{DateTime, Utc}; use std::{ collections::HashMap, fmt::{Display, Formatter}, sync::Arc, }; use tracing::error; use crate::constants::{ BACKUP_TABLE_FIELD_ATTACHMENT_HOLDERS, BACKUP_TABLE_FIELD_BACKUP_ID, BACKUP_TABLE_FIELD_COMPACTION_HOLDER, BACKUP_TABLE_FIELD_CREATED, BACKUP_TABLE_FIELD_RECOVERY_DATA, BACKUP_TABLE_FIELD_USER_ID, BACKUP_TABLE_INDEX_USERID_CREATED, BACKUP_TABLE_NAME, LOG_TABLE_FIELD_ATTACHMENT_HOLDERS, LOG_TABLE_FIELD_BACKUP_ID, LOG_TABLE_FIELD_DATA_HASH, LOG_TABLE_FIELD_LOG_ID, LOG_TABLE_FIELD_PERSISTED_IN_BLOB, LOG_TABLE_FIELD_VALUE, LOG_TABLE_NAME, }; #[derive(Clone, Debug)] pub struct BackupItem { pub user_id: String, pub backup_id: String, pub created: DateTime, pub recovery_data: String, pub compaction_holder: String, pub attachment_holders: String, } +impl BackupItem { + pub fn new( + user_id: String, + backup_id: String, + compaction_holder: String, + ) -> Self { + BackupItem { + user_id, + backup_id, + compaction_holder, + created: chrono::Utc::now(), + // TODO: Recovery data is mocked with random string + recovery_data: crate::utils::generate_random_string( + 20, + &mut rand::thread_rng(), + ), + attachment_holders: String::new(), + } + } +} + #[derive(Clone, Debug)] pub struct LogItem { pub backup_id: String, pub log_id: String, pub persisted_in_blob: bool, pub value: String, pub attachment_holders: String, pub data_hash: String, } #[derive(Clone)] pub struct DatabaseClient { client: Arc, } impl DatabaseClient { pub fn new(aws_config: &aws_types::SdkConfig) -> Self { DatabaseClient { client: Arc::new(aws_sdk_dynamodb::Client::new(aws_config)), } } // backup item pub async fn put_backup_item( &self, backup_item: BackupItem, ) -> Result<(), Error> { let item = HashMap::from([ ( BACKUP_TABLE_FIELD_USER_ID.to_string(), AttributeValue::S(backup_item.user_id), ), ( BACKUP_TABLE_FIELD_CREATED.to_string(), AttributeValue::S(backup_item.created.to_rfc3339()), ), ( BACKUP_TABLE_FIELD_BACKUP_ID.to_string(), AttributeValue::S(backup_item.backup_id), ), ( BACKUP_TABLE_FIELD_RECOVERY_DATA.to_string(), AttributeValue::S(backup_item.recovery_data), ), ( BACKUP_TABLE_FIELD_COMPACTION_HOLDER.to_string(), AttributeValue::S(backup_item.compaction_holder), ), ( BACKUP_TABLE_FIELD_ATTACHMENT_HOLDERS.to_string(), AttributeValue::S(backup_item.attachment_holders), ), ]); self .client .put_item() .table_name(BACKUP_TABLE_NAME) .set_item(Some(item)) .send() .await .map_err(|e| { error!("DynamoDB client failed to put backup item"); Error::AwsSdk(e.into()) })?; Ok(()) } pub async fn find_backup_item( &self, user_id: &str, backup_id: &str, ) -> Result, Error> { let item_key = HashMap::from([ ( BACKUP_TABLE_FIELD_USER_ID.to_string(), AttributeValue::S(user_id.to_string()), ), ( BACKUP_TABLE_FIELD_BACKUP_ID.to_string(), AttributeValue::S(backup_id.to_string()), ), ]); match self .client .get_item() .table_name(BACKUP_TABLE_NAME) .set_key(Some(item_key)) .send() .await .map_err(|e| { error!("DynamoDB client failed to find backup item"); Error::AwsSdk(e.into()) })? { GetItemOutput { item: Some(item), .. } => { let backup_item = parse_backup_item(item)?; Ok(Some(backup_item)) } _ => Ok(None), } } pub async fn find_last_backup_item( &self, user_id: &str, ) -> Result, Error> { let response = self .client .query() .table_name(BACKUP_TABLE_NAME) .index_name(BACKUP_TABLE_INDEX_USERID_CREATED) .key_condition_expression("#userID = :valueToMatch") .expression_attribute_names("#userID", BACKUP_TABLE_FIELD_USER_ID) .expression_attribute_values( ":valueToMatch", AttributeValue::S(user_id.to_string()), ) .limit(1) .scan_index_forward(false) .send() .await .map_err(|e| { error!("DynamoDB client failed to find last backup"); Error::AwsSdk(e.into()) })?; match response.items.unwrap_or_default().pop() { Some(item) => { let backup_item = parse_backup_item(item)?; Ok(Some(backup_item)) } None => Ok(None), } } pub async fn remove_backup_item(&self, backup_id: &str) -> Result<(), Error> { self .client .delete_item() .table_name(BACKUP_TABLE_NAME) .key( BACKUP_TABLE_FIELD_BACKUP_ID, AttributeValue::S(backup_id.to_string()), ) .send() .await .map_err(|e| { error!("DynamoDB client failed to remove backup item"); Error::AwsSdk(e.into()) })?; Ok(()) } // log item pub async fn put_log_item(&self, log_item: LogItem) -> Result<(), Error> { let item = HashMap::from([ ( LOG_TABLE_FIELD_BACKUP_ID.to_string(), AttributeValue::S(log_item.backup_id), ), ( LOG_TABLE_FIELD_LOG_ID.to_string(), AttributeValue::S(log_item.log_id), ), ( LOG_TABLE_FIELD_PERSISTED_IN_BLOB.to_string(), AttributeValue::Bool(log_item.persisted_in_blob), ), ( LOG_TABLE_FIELD_VALUE.to_string(), AttributeValue::S(log_item.value), ), ( LOG_TABLE_FIELD_DATA_HASH.to_string(), AttributeValue::S(log_item.data_hash), ), ( LOG_TABLE_FIELD_ATTACHMENT_HOLDERS.to_string(), AttributeValue::S(log_item.attachment_holders), ), ]); self .client .put_item() .table_name(LOG_TABLE_NAME) .set_item(Some(item)) .send() .await .map_err(|e| { error!("DynamoDB client failed to put log item"); Error::AwsSdk(e.into()) })?; Ok(()) } pub async fn find_log_item( &self, backup_id: &str, log_id: &str, ) -> Result, Error> { let item_key = HashMap::from([ ( LOG_TABLE_FIELD_BACKUP_ID.to_string(), AttributeValue::S(backup_id.to_string()), ), ( LOG_TABLE_FIELD_LOG_ID.to_string(), AttributeValue::S(log_id.to_string()), ), ]); match self .client .get_item() .table_name(LOG_TABLE_NAME) .set_key(Some(item_key)) .send() .await .map_err(|e| { error!("DynamoDB client failed to find log item"); Error::AwsSdk(e.into()) })? { GetItemOutput { item: Some(item), .. } => { let log_item = parse_log_item(item)?; Ok(Some(log_item)) } _ => Ok(None), } } pub async fn find_log_items_for_backup( &self, backup_id: &str, ) -> Result, Error> { let response = self .client .query() .table_name(LOG_TABLE_NAME) .key_condition_expression("#backupID = :valueToMatch") .expression_attribute_names("#backupID", LOG_TABLE_FIELD_BACKUP_ID) .expression_attribute_values( ":valueToMatch", AttributeValue::S(backup_id.to_string()), ) .send() .await .map_err(|e| { error!("DynamoDB client failed to find log items for backup"); Error::AwsSdk(e.into()) })?; if response.count == 0 { return Ok(Vec::new()); } let mut results: Vec = Vec::with_capacity(response.count() as usize); for item in response.items.unwrap_or_default() { let log_item = parse_log_item(item)?; results.push(log_item); } Ok(results) } pub async fn remove_log_item(&self, log_id: &str) -> Result<(), Error> { self .client .delete_item() .table_name(LOG_TABLE_NAME) .key( LOG_TABLE_FIELD_LOG_ID, AttributeValue::S(log_id.to_string()), ) .send() .await .map_err(|e| { error!("DynamoDB client failed to remove log item"); Error::AwsSdk(e.into()) })?; Ok(()) } } #[derive( Debug, derive_more::Display, derive_more::From, derive_more::Error, )] pub enum Error { #[display(...)] AwsSdk(DynamoDBError), #[display(...)] Attribute(DBItemError), } #[derive(Debug, derive_more::Error, derive_more::Constructor)] pub struct DBItemError { attribute_name: &'static str, attribute_value: Option, attribute_error: DBItemAttributeError, } impl Display for DBItemError { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { match &self.attribute_error { DBItemAttributeError::Missing => { write!(f, "Attribute {} is missing", self.attribute_name) } DBItemAttributeError::IncorrectType => write!( f, "Value for attribute {} has incorrect type: {:?}", self.attribute_name, self.attribute_value ), error => write!( f, "Error regarding attribute {} with value {:?}: {}", self.attribute_name, self.attribute_value, error ), } } } #[derive(Debug, derive_more::Display, derive_more::Error)] pub enum DBItemAttributeError { #[display(...)] Missing, #[display(...)] IncorrectType, #[display(...)] InvalidTimestamp(chrono::ParseError), } fn parse_string_attribute( attribute_name: &'static str, attribute_value: Option, ) -> Result { match attribute_value { Some(AttributeValue::S(value)) => Ok(value), Some(_) => Err(DBItemError::new( attribute_name, attribute_value, DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( attribute_name, attribute_value, DBItemAttributeError::Missing, )), } } fn parse_bool_attribute( attribute_name: &'static str, attribute_value: Option, ) -> Result { match attribute_value { Some(AttributeValue::Bool(value)) => Ok(value), Some(_) => Err(DBItemError::new( attribute_name, attribute_value, DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( attribute_name, attribute_value, DBItemAttributeError::Missing, )), } } fn parse_datetime_attribute( attribute_name: &'static str, attribute_value: Option, ) -> Result, DBItemError> { if let Some(AttributeValue::S(datetime)) = &attribute_value { // parse() accepts a relaxed RFC3339 string datetime.parse().map_err(|e| { DBItemError::new( attribute_name, attribute_value, DBItemAttributeError::InvalidTimestamp(e), ) }) } else { Err(DBItemError::new( attribute_name, attribute_value, DBItemAttributeError::Missing, )) } } fn parse_backup_item( mut item: HashMap, ) -> Result { let user_id = parse_string_attribute( BACKUP_TABLE_FIELD_USER_ID, item.remove(BACKUP_TABLE_FIELD_USER_ID), )?; let backup_id = parse_string_attribute( BACKUP_TABLE_FIELD_BACKUP_ID, item.remove(BACKUP_TABLE_FIELD_BACKUP_ID), )?; let created = parse_datetime_attribute( BACKUP_TABLE_FIELD_CREATED, item.remove(BACKUP_TABLE_FIELD_CREATED), )?; let recovery_data = parse_string_attribute( BACKUP_TABLE_FIELD_RECOVERY_DATA, item.remove(BACKUP_TABLE_FIELD_RECOVERY_DATA), )?; let compaction_holder = parse_string_attribute( BACKUP_TABLE_FIELD_COMPACTION_HOLDER, item.remove(BACKUP_TABLE_FIELD_COMPACTION_HOLDER), )?; let attachment_holders = parse_string_attribute( BACKUP_TABLE_FIELD_ATTACHMENT_HOLDERS, item.remove(BACKUP_TABLE_FIELD_ATTACHMENT_HOLDERS), )?; Ok(BackupItem { user_id, backup_id, created, recovery_data, compaction_holder, attachment_holders, }) } fn parse_log_item( mut item: HashMap, ) -> Result { let backup_id = parse_string_attribute( LOG_TABLE_FIELD_BACKUP_ID, item.remove(LOG_TABLE_FIELD_BACKUP_ID), )?; let log_id = parse_string_attribute( LOG_TABLE_FIELD_LOG_ID, item.remove(LOG_TABLE_FIELD_LOG_ID), )?; let persisted_in_blob = parse_bool_attribute( LOG_TABLE_FIELD_PERSISTED_IN_BLOB, item.remove(LOG_TABLE_FIELD_PERSISTED_IN_BLOB), )?; let value = parse_string_attribute( LOG_TABLE_FIELD_VALUE, item.remove(LOG_TABLE_FIELD_VALUE), )?; let data_hash = parse_string_attribute( LOG_TABLE_FIELD_DATA_HASH, item.remove(LOG_TABLE_FIELD_DATA_HASH), )?; let attachment_holders = parse_string_attribute( LOG_TABLE_FIELD_ATTACHMENT_HOLDERS, item.remove(LOG_TABLE_FIELD_ATTACHMENT_HOLDERS), )?; Ok(LogItem { log_id, backup_id, persisted_in_blob, value, data_hash, attachment_holders, }) } diff --git a/services/backup/src/main.rs b/services/backup/src/main.rs index 19073ec26..1740ef2b5 100644 --- a/services/backup/src/main.rs +++ b/services/backup/src/main.rs @@ -1,51 +1,52 @@ use anyhow::Result; use std::net::SocketAddr; use tonic::transport::Server; use tracing::{info, Level}; use tracing_subscriber::EnvFilter; use crate::service::{BackupServiceServer, MyBackupService}; pub mod blob; pub mod config; pub mod constants; pub mod database; pub mod service; +pub mod utils; // re-export this to be available as crate::CONFIG pub use config::CONFIG; fn configure_logging() -> Result<()> { let filter = EnvFilter::builder() .with_default_directive(Level::INFO.into()) .with_env_var(constants::LOG_LEVEL_ENV_VAR) .from_env_lossy(); let subscriber = tracing_subscriber::fmt().with_env_filter(filter).finish(); tracing::subscriber::set_global_default(subscriber)?; Ok(()) } async fn run_grpc_server(db: database::DatabaseClient) -> Result<()> { let addr: SocketAddr = format!("[::]:{}", CONFIG.listening_port).parse()?; let backup_service = MyBackupService::new(db); info!("Starting gRPC server listening at {}", addr.to_string()); Server::builder() .add_service(BackupServiceServer::new(backup_service)) .serve(addr) .await?; Ok(()) } #[tokio::main] async fn main() -> Result<()> { config::parse_cmdline_args(); configure_logging()?; let aws_config = config::load_aws_config().await; let db = database::DatabaseClient::new(&aws_config); run_grpc_server(db).await } diff --git a/services/backup/src/service/handlers/create_backup.rs b/services/backup/src/service/handlers/create_backup.rs index 2ea384441..3084ca01b 100644 --- a/services/backup/src/service/handlers/create_backup.rs +++ b/services/backup/src/service/handlers/create_backup.rs @@ -1,87 +1,96 @@ use tonic::Status; use crate::{blob::PutClient, database::DatabaseClient, service::proto}; type CreateBackupResult = Result; enum HandlerState { /// Initial state. Handler is receiving non-data inputs ReceivingParams, /// Handler is receiving data chunks ReceivingData { blob_client: PutClient }, /// A special case when Blob service claims that a blob with given /// [`CreateBackupHandler::data_hash`] already exists DataAlreadyExists, } pub struct CreateBackupHandler { // flow control pub should_close_stream: bool, // inputs user_id: Option, device_id: Option, key_entropy: Option>, data_hash: Option, // client instances db: DatabaseClient, // internal state state: HandlerState, backup_id: String, holder: Option, } impl CreateBackupHandler { pub fn new(db: &DatabaseClient) -> Self { CreateBackupHandler { should_close_stream: false, user_id: None, device_id: None, key_entropy: None, data_hash: None, db: db.clone(), state: HandlerState::ReceivingParams, backup_id: String::new(), holder: None, } } pub async fn handle_user_id( &mut self, user_id: String, ) -> CreateBackupResult { unimplemented!() } pub async fn handle_device_id( &mut self, device_id: String, ) -> CreateBackupResult { unimplemented!() } pub async fn handle_key_entropy( &mut self, key_entropy: Vec, ) -> CreateBackupResult { unimplemented!() } pub async fn handle_data_hash( &mut self, data_hash: Vec, ) -> CreateBackupResult { unimplemented!() } pub async fn handle_data_chunk( &mut self, data_chunk: Vec, ) -> CreateBackupResult { unimplemented!() } /// This function should be called after the input stream is finished. pub async fn finish(self) -> Result<(), Status> { unimplemented!() } } + +/// Generates ID for a new backup +fn generate_backup_id(device_id: &str) -> String { + format!( + "{device_id}_{timestamp}", + device_id = device_id, + timestamp = chrono::Utc::now().timestamp_millis() + ) +} diff --git a/services/backup/src/service/mod.rs b/services/backup/src/service/mod.rs index dabb3b19b..f8744f98a 100644 --- a/services/backup/src/service/mod.rs +++ b/services/backup/src/service/mod.rs @@ -1,146 +1,167 @@ +use aws_sdk_dynamodb::Error as DynamoDBError; use proto::backup_service_server::BackupService; use std::pin::Pin; use tokio::sync::mpsc; use tokio_stream::{wrappers::ReceiverStream, Stream, StreamExt}; use tonic::{Request, Response, Status}; -use tracing::{debug, error, info, instrument, trace, Instrument}; +use tracing::{debug, error, info, instrument, trace, warn, Instrument}; use crate::{ - constants::MPSC_CHANNEL_BUFFER_CAPACITY, database::DatabaseClient, + constants::MPSC_CHANNEL_BUFFER_CAPACITY, + database::{DatabaseClient, Error as DBError}, }; mod proto { tonic::include_proto!("backup"); } pub use proto::backup_service_server::BackupServiceServer; /// submodule containing gRPC endpoint handler implementations mod handlers { pub(super) mod create_backup; // re-exports for convenient usage in handlers + pub(self) use super::handle_db_error; pub(self) use super::proto; } use self::handlers::create_backup::CreateBackupHandler; pub struct MyBackupService { db: DatabaseClient, } impl MyBackupService { pub fn new(db_client: DatabaseClient) -> Self { MyBackupService { db: db_client } } } // gRPC implementation #[tonic::async_trait] impl BackupService for MyBackupService { type CreateNewBackupStream = Pin< Box< dyn Stream> + Send, >, >; #[instrument(skip_all, fields(device_id, data_hash, backup_id, blob_holder))] async fn create_new_backup( &self, request: Request>, ) -> Result, Status> { use proto::create_new_backup_request::Data::*; info!("CreateNewBackup request: {:?}", request); let mut in_stream = request.into_inner(); let (tx, rx) = mpsc::channel(MPSC_CHANNEL_BUFFER_CAPACITY); let db = self.db.clone(); let worker = async move { let mut handler = CreateBackupHandler::new(&db); while let Some(message) = in_stream.next().await { let response = match message { Ok(proto::CreateNewBackupRequest { data: Some(UserId(user_id)), }) => handler.handle_user_id(user_id).await, Ok(proto::CreateNewBackupRequest { data: Some(DeviceId(device_id)), }) => handler.handle_device_id(device_id).await, Ok(proto::CreateNewBackupRequest { data: Some(KeyEntropy(key_entropy)), }) => handler.handle_key_entropy(key_entropy).await, Ok(proto::CreateNewBackupRequest { data: Some(NewCompactionHash(hash)), }) => handler.handle_data_hash(hash).await, Ok(proto::CreateNewBackupRequest { data: Some(NewCompactionChunk(chunk)), }) => handler.handle_data_chunk(chunk).await, unexpected => { error!("Received an unexpected request: {:?}", unexpected); Err(Status::unknown("unknown error")) } }; trace!("Sending response: {:?}", response); if let Err(e) = tx.send(response).await { error!("Response was dropped: {}", e); break; } if handler.should_close_stream { trace!("Handler requested to close stream"); break; } } if let Err(status) = handler.finish().await { trace!("Sending error response: {:?}", status); let _ = tx.send(Err(status)).await; } debug!("Request finished processing"); }; tokio::spawn(worker.in_current_span()); let out_stream = ReceiverStream::new(rx); Ok(Response::new( Box::pin(out_stream) as Self::CreateNewBackupStream )) } #[instrument(skip(self))] async fn send_log( &self, _request: Request>, ) -> Result, Status> { Err(Status::unimplemented("unimplemented")) } type RecoverBackupKeyStream = Pin< Box< dyn Stream> + Send, >, >; #[instrument(skip(self))] async fn recover_backup_key( &self, _request: Request>, ) -> Result, Status> { Err(Status::unimplemented("unimplemented")) } type PullBackupStream = Pin< Box> + Send>, >; #[instrument(skip(self))] async fn pull_backup( &self, _request: Request, ) -> Result, Status> { Err(Status::unimplemented("unimplemented")) } #[instrument(skip(self))] async fn add_attachments( &self, _request: Request, ) -> Result, Status> { Err(Status::unimplemented("unimplemented")) } } + +/// A helper converting our Database errors into gRPC responses +fn handle_db_error(db_error: DBError) -> Status { + match db_error { + DBError::AwsSdk(DynamoDBError::InternalServerError(_)) + | DBError::AwsSdk(DynamoDBError::ProvisionedThroughputExceededException( + _, + )) + | DBError::AwsSdk(DynamoDBError::RequestLimitExceeded(_)) => { + warn!("AWS transient error occurred"); + Status::unavailable("please retry") + } + e => { + error!("Encountered an unexpected error: {}", e); + Status::failed_precondition("unexpected error") + } + } +} diff --git a/services/backup/src/utils.rs b/services/backup/src/utils.rs new file mode 100644 index 000000000..0921961d1 --- /dev/null +++ b/services/backup/src/utils.rs @@ -0,0 +1,28 @@ +use rand::{distributions::DistString, CryptoRng, Rng}; +use uuid::Uuid; + +use crate::constants::ID_SEPARATOR; + +/// Generates a blob `holder` string used to store backup/log data +/// in Blob service +pub fn generate_blob_holder( + blob_hash: &str, + backup_id: &str, + resource_id: Option<&str>, +) -> String { + format!( + "{backup_id}{sep}{resource_id}{sep}{blob_hash}{sep}{uuid}", + backup_id = backup_id, + resource_id = resource_id.unwrap_or_default(), + blob_hash = blob_hash, + sep = ID_SEPARATOR, + uuid = Uuid::new_v4() + ) +} + +pub fn generate_random_string( + length: usize, + rng: &mut (impl Rng + CryptoRng), +) -> String { + rand::distributions::Alphanumeric.sample_string(rng, length) +}