diff --git a/native/native_rust_library/src/backup/upload_handler.rs b/native/native_rust_library/src/backup/upload_handler.rs index 1bfa4ebfd..fa4dc69cf 100644 --- a/native/native_rust_library/src/backup/upload_handler.rs +++ b/native/native_rust_library/src/backup/upload_handler.rs @@ -1,337 +1,344 @@ use super::file_info::BackupFileInfo; use super::get_user_identity_from_secure_store; use crate::backup::compaction_upload_promises; use crate::constants::BACKUP_SERVICE_CONNECTION_RETRY_DELAY; use crate::ffi::{ get_backup_directory_path, get_backup_file_path, get_backup_log_file_path, get_backup_user_keys_file_path, get_siwe_backup_message_path, }; use crate::BACKUP_SOCKET_ADDR; use crate::RUNTIME; use backup_client::UserIdentity; use backup_client::{ BackupClient, Error as BackupError, LogUploadConfirmation, Stream, StreamExt, }; use backup_client::{BackupData, Sink, UploadLogRequest}; use lazy_static::lazy_static; use std::collections::HashSet; use std::convert::Infallible; use std::error::Error; use std::future::Future; use std::io::BufRead; use std::io::ErrorKind; use std::path::PathBuf; use std::pin::Pin; use std::sync::{Arc, Mutex}; use tokio::sync::Notify; use tokio::task::JoinHandle; lazy_static! { pub static ref UPLOAD_HANDLER: Arc>>> = Arc::new(Mutex::new(None)); static ref TRIGGER_BACKUP_FILE_UPLOAD: Arc = Arc::new(Notify::new()); static ref BACKUP_FOLDER_PATH: PathBuf = PathBuf::from( get_backup_directory_path().expect("Getting backup directory path failed") ); } pub mod ffi { use super::*; pub fn start_backup_handler() -> Result<(), Box> { let mut handle = UPLOAD_HANDLER.lock()?; match handle.take() { // Don't start backup handler if it's already running Some(handle) if !handle.is_finished() => (), _ => { *handle = Some(RUNTIME.spawn(super::start()?)); } } Ok(()) } pub fn stop_backup_handler() -> Result<(), Box> { let Some(handler) = UPLOAD_HANDLER.lock()?.take() else { return Ok(()); }; if handler.is_finished() { return Ok(()); } handler.abort(); Ok(()) } pub fn trigger_backup_file_upload() { TRIGGER_BACKUP_FILE_UPLOAD.notify_one(); } } pub fn start() -> Result, Box> { let backup_client = BackupClient::new(BACKUP_SOCKET_ADDR)?; let user_identity = get_user_identity_from_secure_store()?; Ok(async move { loop { let (tx, rx) = match backup_client.upload_logs(&user_identity).await { Ok(ws) => ws, Err(err) => { println!( "Backup handler error when estabilishing connection: '{err:?}'" ); tokio::time::sleep(BACKUP_SERVICE_CONNECTION_RETRY_DELAY).await; continue; } }; let mut tx = Box::pin(tx); let mut rx = Box::pin(rx); let logs_waiting_for_confirmation = Mutex::new(HashSet::::new()); loop { let err = tokio::select! { Err(err) = watch_and_upload_files(&backup_client, &user_identity, &mut tx, &logs_waiting_for_confirmation) => err, Err(err) = delete_confirmed_logs(&mut rx, &logs_waiting_for_confirmation) => err, }; println!("Backup handler error: '{err:?}'"); match err { BackupHandlerError::BackupError(_) | BackupHandlerError::WSClosed | BackupHandlerError::LockError => break, BackupHandlerError::IoError(_) | BackupHandlerError::CxxException(_) => continue, BackupHandlerError::FromUtf8Error(_) => break, } } tokio::time::sleep(BACKUP_SERVICE_CONNECTION_RETRY_DELAY).await; println!("Retrying backup log upload"); } }) } async fn watch_and_upload_files( backup_client: &BackupClient, user_identity: &UserIdentity, tx: &mut Pin>>, logs_waiting_for_confirmation: &Mutex>, ) -> Result { loop { let mut file_stream = match tokio::fs::read_dir(&*BACKUP_FOLDER_PATH).await { Ok(file_stream) => file_stream, Err(err) if err.kind() == ErrorKind::NotFound => { TRIGGER_BACKUP_FILE_UPLOAD.notified().await; continue; } Err(err) => return Err(err.into()), }; while let Some(file) = file_stream.next_entry().await? { let path = file.path(); if logs_waiting_for_confirmation.lock()?.contains(&path) { continue; } let Ok(BackupFileInfo { backup_id, log_id, additional_data, }) = path.clone().try_into() else { continue; }; // Skip additional data files (attachments, user keys). They will be // handled when we iterate over the corresponding files with the // main content if additional_data.is_some() { continue; } if let Some(log_id) = log_id { log::upload_files(tx, backup_id, log_id).await?; logs_waiting_for_confirmation.lock()?.insert(path); } else { compaction::upload_files(backup_client, user_identity, backup_id) .await?; } } TRIGGER_BACKUP_FILE_UPLOAD.notified().await; } } async fn delete_confirmed_logs( rx: &mut Pin< Box>>, >, logs_waiting_for_confirmation: &Mutex>, ) -> Result { while let Some(LogUploadConfirmation { backup_id, log_id }) = rx.next().await.transpose()? { let path = get_backup_log_file_path(&backup_id, &log_id.to_string(), false)?; logs_waiting_for_confirmation .lock()? .remove(&PathBuf::from(path)); tokio::spawn(log::cleanup_files(backup_id, log_id)); } Err(BackupHandlerError::WSClosed) } pub mod compaction { use super::*; pub async fn upload_files( backup_client: &BackupClient, user_identity: &UserIdentity, backup_id: String, ) -> Result<(), BackupHandlerError> { let user_data_path = get_backup_file_path(&backup_id, false)?; - let user_data = tokio::fs::read(&user_data_path).await?; - + let user_data = match tokio::fs::read(&user_data_path).await { + Ok(data) => Some(data), + Err(err) if err.kind() == ErrorKind::NotFound => None, + Err(err) => return Err(err.into()), + }; let user_keys_path = get_backup_user_keys_file_path(&backup_id)?; - let user_keys = tokio::fs::read(&user_keys_path).await?; + let user_keys = match tokio::fs::read(&user_keys_path).await { + Ok(data) => Some(data), + Err(err) if err.kind() == ErrorKind::NotFound => None, + Err(err) => return Err(err.into()), + }; let attachments_path = get_backup_file_path(&backup_id, true)?; let attachments = match tokio::fs::read(&attachments_path).await { Ok(data) => data.lines().collect::>()?, Err(err) if err.kind() == ErrorKind::NotFound => Vec::new(), Err(err) => return Err(err.into()), }; let siwe_backup_msg_path = get_siwe_backup_message_path(&backup_id)?; let siwe_backup_msg = match tokio::fs::read(&siwe_backup_msg_path).await { Ok(data) => match String::from_utf8(data) { Ok(valid_string) => Some(valid_string), Err(err) => return Err(err.into()), }, Err(err) if err.kind() == ErrorKind::NotFound => None, Err(err) => return Err(err.into()), }; let backup_data = BackupData { backup_id: backup_id.clone(), user_data, user_keys, attachments, siwe_backup_msg, }; backup_client .upload_backup(user_identity, backup_data) .await?; compaction_upload_promises::resolve(&backup_id, Ok(())); tokio::spawn(cleanup_files(backup_id)); Ok(()) } pub async fn cleanup_files(backup_id: String) { let backup_files_cleanup = async { let user_data_path = get_backup_file_path(&backup_id, false)?; tokio::fs::remove_file(&user_data_path).await?; let user_keys_path = get_backup_user_keys_file_path(&backup_id)?; tokio::fs::remove_file(&user_keys_path).await?; let attachments_path = get_backup_file_path(&backup_id, true)?; match tokio::fs::remove_file(&attachments_path).await { Ok(()) => Result::<_, Box>::Ok(()), Err(err) if err.kind() == ErrorKind::NotFound => Ok(()), Err(err) => Err(err.into()), }?; let siwe_backup_msg_path = get_siwe_backup_message_path(&backup_id)?; match tokio::fs::remove_file(&siwe_backup_msg_path).await { Ok(()) => Result::<_, Box>::Ok(()), Err(err) if err.kind() == ErrorKind::NotFound => Ok(()), Err(err) => Err(err.into()), } }; if let Err(err) = backup_files_cleanup.await { println!("Error when cleaning up the backup files: {err:?}"); } } } mod log { use backup_client::SinkExt; use super::*; pub async fn upload_files( tx: &mut Pin>>, backup_id: String, log_id: usize, ) -> Result<(), BackupHandlerError> { let log_id_string = log_id.to_string(); let content_path = get_backup_log_file_path(&backup_id, &log_id_string, false)?; let content = tokio::fs::read(&content_path).await?; let attachments_path = get_backup_log_file_path(&backup_id, &log_id_string, true)?; let attachments = match tokio::fs::read(&attachments_path).await { Ok(data) => Some(data.lines().collect::>()?), Err(err) if err.kind() == ErrorKind::NotFound => None, Err(err) => return Err(err.into()), }; let log_data = UploadLogRequest { backup_id, log_id, content, attachments, }; tx.send(log_data.clone()).await?; Ok(()) } pub async fn cleanup_files(backup_id: String, log_id: usize) { let backup_files_cleanup = async { let log_id = log_id.to_string(); let path = get_backup_log_file_path(&backup_id, &log_id, false)?; tokio::fs::remove_file(&path).await?; let attachments_path = get_backup_log_file_path(&backup_id, &log_id, true)?; match tokio::fs::remove_file(&attachments_path).await { Ok(()) => Result::<_, Box>::Ok(()), Err(err) if err.kind() == ErrorKind::NotFound => Ok(()), Err(err) => Err(err.into()), } }; if let Err(err) = backup_files_cleanup.await { println!("{err:?}"); } } } #[derive( Debug, derive_more::Display, derive_more::From, derive_more::Error, )] pub enum BackupHandlerError { BackupError(BackupError), WSClosed, IoError(std::io::Error), CxxException(cxx::Exception), LockError, FromUtf8Error(std::string::FromUtf8Error), } impl From> for BackupHandlerError { fn from(_: std::sync::PoisonError) -> Self { Self::LockError } } diff --git a/services/commtest/src/backup/backup_utils.rs b/services/commtest/src/backup/backup_utils.rs index 964661ac2..4dcef9e5a 100644 --- a/services/commtest/src/backup/backup_utils.rs +++ b/services/commtest/src/backup/backup_utils.rs @@ -1,97 +1,97 @@ use crate::identity::device::DeviceInfo; use crate::tools::generate_stable_nbytes; use backup_client::{BackupData, Error as BackupClientError}; use bytesize::ByteSize; use comm_lib::auth::UserIdentity; use comm_lib::backup::UploadLogRequest; use reqwest::StatusCode; use uuid::Uuid; pub fn generate_backup_data(predefined_byte_value: u8) -> BackupData { BackupData { backup_id: Uuid::new_v4().to_string(), - user_keys: generate_stable_nbytes( + user_keys: Some(generate_stable_nbytes( ByteSize::kib(4).as_u64() as usize, Some(predefined_byte_value), - ), - user_data: generate_stable_nbytes( + )), + user_data: Some(generate_stable_nbytes( ByteSize::mib(4).as_u64() as usize, Some(predefined_byte_value), - ), + )), attachments: vec![], siwe_backup_msg: None, } } fn generate_log_data(backup_id: &str, value: u8) -> Vec { const IN_DB_SIZE: usize = ByteSize::kib(4).as_u64() as usize; const IN_BLOB_SIZE: usize = ByteSize::kib(400).as_u64() as usize; (1..30) .map(|log_id| { let size = if log_id % 2 == 0 { IN_DB_SIZE } else { IN_BLOB_SIZE }; let attachments = if log_id % 10 == 0 { Some(vec![Uuid::new_v4().to_string()]) } else { None }; let mut content = generate_stable_nbytes(size, Some(value)); let unique_suffix = log_id.to_string(); content.extend(unique_suffix.as_bytes()); UploadLogRequest { backup_id: backup_id.to_string(), log_id, content, attachments, } }) .collect() } pub fn generate_backup_data_with_logs( predefined_byte_values: Vec, ) -> Vec<(BackupData, Vec)> { predefined_byte_values .into_iter() .map(|byte_value| { let backup_data = generate_backup_data(byte_value); let log_data = generate_log_data(&backup_data.backup_id, byte_value); (backup_data, log_data) }) .collect() } pub fn assert_reqwest_error( response: Result, expected_status: StatusCode, ) { match response { Err(BackupClientError::ReqwestError(error)) => { assert_eq!( error.status(), Some(expected_status), "Expected status {}", expected_status ); } Err(err) => panic!( "Backup should return ReqwestError, instead got response: {:?}", err ), Ok(_) => panic!("Backup should return BackupClientError"), } } pub fn create_user_identity(device_info: DeviceInfo) -> UserIdentity { UserIdentity { user_id: device_info.user_id.clone(), access_token: device_info.access_token.clone(), device_id: device_info.device_id.clone(), } } diff --git a/services/commtest/tests/backup_integration_test.rs b/services/commtest/tests/backup_integration_test.rs index c77533f69..e5f2d3de6 100644 --- a/services/commtest/tests/backup_integration_test.rs +++ b/services/commtest/tests/backup_integration_test.rs @@ -1,140 +1,140 @@ use backup_client::{ BackupClient, BackupDescriptor, DownloadedLog, LogUploadConfirmation, RequestedData, SinkExt, StreamExt, TryStreamExt, }; use comm_lib::backup::LatestBackupInfoResponse; use commtest::backup::backup_utils::{ assert_reqwest_error, create_user_identity, generate_backup_data_with_logs, }; use commtest::identity::device::register_user_device; use commtest::{service_addr, tools::Error}; use grpc_clients::identity::DeviceType; use reqwest::StatusCode; use std::collections::HashSet; #[tokio::test] async fn backup_integration_test() -> Result<(), Error> { let backup_client = BackupClient::new(service_addr::BACKUP_SERVICE_HTTP)?; let device_info = register_user_device(None, Some(DeviceType::Ios)).await; let user_identity = create_user_identity(device_info.clone()); let backup_datas = generate_backup_data_with_logs(vec![b'a', b'b']); // Upload backups for (backup_data, log_datas) in &backup_datas { backup_client .upload_backup(&user_identity, backup_data.clone()) .await?; let (mut tx, rx) = backup_client.upload_logs(&user_identity).await?; for log_data in log_datas { tx.send(log_data.clone()).await?; } let result: HashSet = rx.take(log_datas.len()).try_collect().await?; let expected = log_datas .iter() .map(|data| LogUploadConfirmation { backup_id: data.backup_id.clone(), log_id: data.log_id, }) .collect(); assert_eq!(result, expected); } // Test direct lookup let (backup_data, log_datas) = &backup_datas[1]; let second_backup_descriptor = BackupDescriptor::BackupID { backup_id: backup_data.backup_id.clone(), user_identity: user_identity.clone(), }; let user_keys = backup_client .download_backup_data(&second_backup_descriptor, RequestedData::UserKeys) .await?; - assert_eq!(user_keys, backup_data.user_keys); + assert_eq!(Some(user_keys), backup_data.user_keys); let user_data = backup_client .download_backup_data(&second_backup_descriptor, RequestedData::UserData) .await?; - assert_eq!(user_data, backup_data.user_data); + assert_eq!(Some(user_data), backup_data.user_data); // Test latest backup lookup for nonexistent user let latest_backup_descriptor = BackupDescriptor::Latest { user_identifier: "nonexistent_user".to_string(), }; let nonexistent_user_response = backup_client .download_backup_data(&latest_backup_descriptor, RequestedData::BackupInfo) .await; assert_reqwest_error(nonexistent_user_response, StatusCode::BAD_REQUEST); // Test latest backup lookup let latest_backup_descriptor = BackupDescriptor::Latest { user_identifier: device_info.username, }; let backup_info_response = backup_client .download_backup_data(&latest_backup_descriptor, RequestedData::BackupInfo) .await?; let response: LatestBackupInfoResponse = serde_json::from_slice(&backup_info_response)?; assert_eq!(response.backup_id, backup_data.backup_id); assert_eq!(response.user_id, device_info.user_id); let user_keys = backup_client .download_backup_data(&latest_backup_descriptor, RequestedData::UserKeys) .await?; - assert_eq!(user_keys, backup_data.user_keys); + assert_eq!(Some(user_keys), backup_data.user_keys); // Test log download let log_stream = backup_client .download_logs(&user_identity, &backup_data.backup_id) .await; let downloaded_logs: Vec = log_stream.try_collect().await?; let expected_logs: Vec = log_datas .iter() .map(|data| DownloadedLog { content: data.content.clone(), attachments: data.attachments.clone(), }) .collect(); assert_eq!(downloaded_logs, expected_logs); // Test backup cleanup let (removed_backup, _) = &backup_datas[0]; let removed_backup_descriptor = BackupDescriptor::BackupID { backup_id: removed_backup.backup_id.clone(), user_identity: user_identity.clone(), }; let response = backup_client .download_backup_data(&removed_backup_descriptor, RequestedData::UserKeys) .await; assert_reqwest_error(response, StatusCode::NOT_FOUND); // Test log cleanup let log_stream = backup_client .download_logs(&user_identity, &removed_backup.backup_id) .await; let downloaded_logs: Vec = log_stream.try_collect().await?; if !downloaded_logs.is_empty() { panic!( "Logs for first backup should have been removed, \ instead got: {downloaded_logs:?}" ) } Ok(()) } diff --git a/services/commtest/tests/backup_performance_test.rs b/services/commtest/tests/backup_performance_test.rs index 98f84d109..bdd5daa54 100644 --- a/services/commtest/tests/backup_performance_test.rs +++ b/services/commtest/tests/backup_performance_test.rs @@ -1,169 +1,169 @@ use backup_client::{BackupClient, BackupDescriptor, RequestedData}; use comm_lib::backup::LatestBackupInfoResponse; use commtest::backup::backup_utils::{ create_user_identity, generate_backup_data, }; use commtest::identity::device::register_user_device; use commtest::{ service_addr, tools::{obtain_number_of_threads, Error}, }; use grpc_clients::identity::DeviceType; use tokio::{runtime::Runtime, task::JoinSet}; #[tokio::test] async fn backup_performance_test() -> Result<(), Error> { let backup_client = BackupClient::new(service_addr::BACKUP_SERVICE_HTTP)?; let number_of_threads = obtain_number_of_threads(); let rt = Runtime::new().unwrap(); println!( "Running performance tests for backup, number of threads: {}", number_of_threads ); let backup_data: Vec<_> = (0..number_of_threads) .map(|i| generate_backup_data(i as u8)) .collect(); let device_info_1 = register_user_device(None, Some(DeviceType::Ios)).await; let device_info_2 = register_user_device(None, Some(DeviceType::Ios)).await; let user_identities = [ create_user_identity(device_info_1.clone()), create_user_identity(device_info_2.clone()), ]; tokio::task::spawn_blocking(move || { println!("Creating new backups"); rt.block_on(async { let mut set = JoinSet::new(); for (i, item) in backup_data.iter().cloned().enumerate() { let backup_client = backup_client.clone(); let user = user_identities[i % user_identities.len()].clone(); set.spawn(async move { backup_client.upload_backup(&user, item).await.unwrap(); }); } while let Some(result) = set.join_next().await { result.unwrap(); } }); let mut latest_ids_for_user = vec![]; println!("Reading latest ids"); rt.block_on(async { let mut handlers = vec![]; for user in &user_identities { let backup_client = backup_client.clone(); let user_identifier = if user.user_id == device_info_1.user_id { device_info_1.username.clone() } else { device_info_2.username.clone() }; let descriptor = BackupDescriptor::Latest { user_identifier }; handlers.push(tokio::spawn(async move { let response = backup_client .download_backup_data(&descriptor, RequestedData::BackupInfo) .await .unwrap(); serde_json::from_slice::(&response).unwrap() })); } for handler in handlers { latest_ids_for_user.push(handler.await.unwrap().backup_id); } }); assert_eq!(latest_ids_for_user.len(), user_identities.len()); let mut latest_user_keys_for_user = vec![]; println!("Reading latest user keys"); rt.block_on(async { let mut handlers = vec![]; for user in &user_identities { let backup_client = backup_client.clone(); let user_identifier = if user.user_id == device_info_1.user_id { device_info_1.username.clone() } else { device_info_2.username.clone() }; let descriptor = BackupDescriptor::Latest { user_identifier }; handlers.push(tokio::spawn(async move { backup_client .download_backup_data(&descriptor, RequestedData::UserKeys) .await .unwrap() })); } for handler in handlers { latest_user_keys_for_user.push(handler.await.unwrap()); } }); assert_eq!(latest_user_keys_for_user.len(), user_identities.len()); for (backup_id, user_keys) in latest_ids_for_user.iter().zip(latest_user_keys_for_user) { let backup = backup_data .iter() .find(|data| data.backup_id == *backup_id) .expect("Request should return existing backup data"); - assert_eq!(backup.user_keys, user_keys); + assert_eq!(backup.user_keys, Some(user_keys)); } let mut latest_user_data_for_user = vec![]; println!("Reading latest user data"); rt.block_on(async { let mut handlers = vec![]; for (i, backup_id) in latest_ids_for_user.iter().enumerate() { let backup_client = backup_client.clone(); let descriptor = BackupDescriptor::BackupID { backup_id: backup_id.clone(), user_identity: user_identities[i % user_identities.len()].clone(), }; handlers.push(tokio::spawn(async move { backup_client .download_backup_data(&descriptor, RequestedData::UserData) .await .unwrap() })); } for handler in handlers { latest_user_data_for_user.push(handler.await.unwrap()); } }); assert_eq!(latest_user_data_for_user.len(), user_identities.len()); for (backup_id, user_data) in latest_ids_for_user.iter().zip(latest_user_data_for_user) { let backup = backup_data .iter() .find(|data| data.backup_id == *backup_id) .expect("Request should return existing backup data"); - assert_eq!(backup.user_data, user_data); + assert_eq!(backup.user_data, Some(user_data)); } }) .await .expect("Task panicked"); Ok(()) } diff --git a/shared/backup_client/src/lib.rs b/shared/backup_client/src/lib.rs index a07f94728..2d971e9d8 100644 --- a/shared/backup_client/src/lib.rs +++ b/shared/backup_client/src/lib.rs @@ -1,385 +1,401 @@ #[cfg(target_arch = "wasm32")] mod web; use async_stream::{stream, try_stream}; pub use comm_lib::auth::UserIdentity; pub use comm_lib::backup::{ DownloadLogsRequest, LatestBackupInfoResponse, LogWSRequest, LogWSResponse, UploadLogRequest, }; pub use futures_util::{Sink, SinkExt, Stream, StreamExt, TryStreamExt}; use hex::ToHex; use reqwest::{ header::InvalidHeaderValue, multipart::{Form, Part}, Body, }; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::time::Duration; use tokio_tungstenite_wasm::{ connect, Error as TungsteniteError, Message::Binary, }; const LOG_DOWNLOAD_RETRY_DELAY: Duration = Duration::from_secs(5); const LOG_DOWNLOAD_MAX_RETRY: usize = 3; #[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::wasm_bindgen; #[cfg_attr(target_arch = "wasm32", wasm_bindgen)] #[derive(Debug, Clone)] pub struct BackupClient { url: reqwest::Url, } impl BackupClient { pub fn new>(url: T) -> Result { Ok(BackupClient { url: url.try_into()?, }) } } /// Backup functions impl BackupClient { pub async fn upload_backup( &self, user_identity: &UserIdentity, backup_data: BackupData, ) -> Result<(), Error> { let BackupData { backup_id, user_keys, user_data, attachments, siwe_backup_msg, } = backup_data; + let endpoint = match (user_data.clone(), user_keys.clone()) { + (None, None) => return Err(Error::InvalidRequest), + (Some(_), Some(_)) => "backups", + (Some(_), None) => "backups/user_data", + (None, Some(_)) => "backups/user_keys", + }; + let client = reqwest::Client::new(); - let mut form = Form::new() - .text("backup_id", backup_id) - .text( - "user_keys_hash", - Sha256::digest(&user_keys).encode_hex::(), - ) - .part("user_keys", Part::stream(Body::from(user_keys))) - .text( - "user_data_hash", - Sha256::digest(&user_data).encode_hex::(), - ) - .part("user_data", Part::stream(Body::from(user_data))) - .text("attachments", attachments.join("\n")); + let mut form = Form::new().text("backup_id", backup_id); + + if let Some(user_keys_value) = user_keys.clone() { + form = form + .text( + "user_keys_hash", + Sha256::digest(&user_keys_value).encode_hex::(), + ) + .part("user_keys", Part::stream(Body::from(user_keys_value))); + } + + if let Some(user_data_value) = user_data.clone() { + form = form + .text( + "user_data_hash", + Sha256::digest(&user_data_value).encode_hex::(), + ) + .part("user_data", Part::stream(Body::from(user_data_value))); + } + + form = form.text("attachments", attachments.join("\n")); if let Some(siwe_backup_msg_value) = siwe_backup_msg { form = form.text("siwe_backup_msg", siwe_backup_msg_value); } let response = client - .post(self.url.join("backups")?) + .post(self.url.join(endpoint)?) .bearer_auth(user_identity.as_authorization_token()?) .multipart(form) .send() .await?; response.error_for_status()?; Ok(()) } pub async fn download_backup_data( &self, backup_descriptor: &BackupDescriptor, requested_data: RequestedData, ) -> Result, Error> { let client = reqwest::Client::new(); let url = self.url.join("backups/")?; let url = match backup_descriptor { BackupDescriptor::BackupID { backup_id, .. } => { url.join(&format!("{backup_id}/"))? } BackupDescriptor::Latest { user_identifier } => { url.join(&format!("latest/{user_identifier}/"))? } }; let url = match &requested_data { RequestedData::BackupInfo => url.join("backup_info")?, RequestedData::UserKeys => url.join("user_keys")?, RequestedData::UserData => url.join("user_data")?, }; let mut request = client.get(url); if let BackupDescriptor::BackupID { user_identity, .. } = backup_descriptor { request = request.bearer_auth(user_identity.as_authorization_token()?) } let response = request.send().await?; let result = response.error_for_status()?.bytes().await?.to_vec(); Ok(result) } } /// Log functions impl BackupClient { pub async fn upload_logs( &self, user_identity: &UserIdentity, ) -> Result< ( impl Sink, impl Stream>, ), Error, > { let (tx, rx) = self.create_log_ws_connection(user_identity).await?; let rx = rx.map(|response| match response? { LogWSResponse::LogUploaded { backup_id, log_id } => { Ok(LogUploadConfirmation { backup_id, log_id }) } LogWSResponse::ServerError => Err(Error::ServerError), msg => Err(Error::InvalidBackupMessage(msg)), }); Ok((tx, rx)) } /// Handles complete log download. /// It will try and retry download a few times, but if the issues persist /// the next item returned will be the last received error and the stream /// will be closed. pub async fn download_logs<'this>( &'this self, user_identity: &'this UserIdentity, backup_id: &'this str, ) -> impl Stream> + 'this { stream! { let mut last_downloaded_log = None; let mut fail_count = 0; 'retry: loop { let stream = self.log_download_stream(user_identity, backup_id, &mut last_downloaded_log).await; let mut stream = Box::pin(stream); while let Some(item) = stream.next().await { match item { Ok(log) => yield Ok(log), Err(err) => { println!("Error when downloading logs: {err:?}"); fail_count += 1; if fail_count >= LOG_DOWNLOAD_MAX_RETRY { yield Err(err); break 'retry; } #[cfg(target_arch = "wasm32")] let _ = web::sleep(LOG_DOWNLOAD_RETRY_DELAY).await; #[cfg(not(target_arch = "wasm32"))] tokio::time::sleep(LOG_DOWNLOAD_RETRY_DELAY).await; continue 'retry; } } } // Everything downloaded return; } println!("Log download failed!"); } } /// Handles singular connection websocket connection. Returns error in case /// anything goes wrong e.g. missing log or connection error. async fn log_download_stream<'stream>( &'stream self, user_identity: &'stream UserIdentity, backup_id: &'stream str, last_downloaded_log: &'stream mut Option, ) -> impl Stream> + 'stream { try_stream! { let (mut tx, mut rx) = self.create_log_ws_connection(user_identity).await?; tx.send(DownloadLogsRequest { backup_id: backup_id.to_string(), from_id: *last_downloaded_log, }) .await?; while let Some(response) = rx.try_next().await? { let expected_log_id = last_downloaded_log.unwrap_or(0); match response { LogWSResponse::LogDownload { content, attachments, log_id, } if log_id == expected_log_id + 1 => { *last_downloaded_log = Some(log_id); yield DownloadedLog { content, attachments, }; } LogWSResponse::LogDownload { .. } => { Err(Error::LogMissing)?; } LogWSResponse::LogDownloadFinished { last_log_id: Some(log_id), } if log_id == expected_log_id => { tx.send(DownloadLogsRequest { backup_id: backup_id.to_string(), from_id: *last_downloaded_log, }) .await? } LogWSResponse::LogDownloadFinished { last_log_id: None } => return, LogWSResponse::LogDownloadFinished { .. } => { Err(Error::LogMissing)?; } msg => Err(Error::InvalidBackupMessage(msg))?, } } Err(Error::WSClosed)?; } } async fn create_log_ws_connection>( &self, user_identity: &UserIdentity, ) -> Result< ( impl Sink, impl Stream>, ), Error, > { let url = self.create_ws_url()?; let stream = connect(url).await?; let (mut tx, rx) = stream.split(); tx.send(Binary(bincode::serialize(&LogWSRequest::Authenticate( user_identity.clone(), ))?)) .await?; let tx = tx.with(|request: Request| async { let request: LogWSRequest = request.into(); let request = bincode::serialize(&request)?; Ok(Binary(request)) }); let rx = rx.filter_map(|msg| async { let bytes = match msg { Ok(Binary(bytes)) => bytes, Ok(_) => return Some(Err(Error::InvalidWSMessage)), Err(err) => return Some(Err(err.into())), }; match bincode::deserialize(&bytes) { Ok(response) => Some(Ok(response)), Err(err) => Some(Err(err.into())), } }); let tx = Box::pin(tx); let mut rx = Box::pin(rx); if let Some(response) = rx.try_next().await? { match response { LogWSResponse::AuthSuccess => {} LogWSResponse::Unauthenticated => Err(Error::Unauthenticated)?, msg => Err(Error::InvalidBackupMessage(msg))?, } } Ok((tx, rx)) } fn create_ws_url(&self) -> Result { let mut url = self.url.clone(); match url.scheme() { "http" => url.set_scheme("ws").map_err(|_| Error::UrlSchemaError)?, "https" => url.set_scheme("wss").map_err(|_| Error::UrlSchemaError)?, _ => (), }; let url = url.join("logs")?; Ok(url) } } #[derive(Debug, Clone)] pub struct BackupData { pub backup_id: String, - pub user_keys: Vec, - pub user_data: Vec, + pub user_keys: Option>, + pub user_data: Option>, pub attachments: Vec, pub siwe_backup_msg: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type")] pub enum BackupDescriptor { BackupID { #[serde(rename = "backupID")] backup_id: String, #[serde(rename = "userIdentity")] user_identity: UserIdentity, }, Latest { user_identifier: String, }, } #[cfg_attr(target_arch = "wasm32", wasm_bindgen)] #[derive(Debug, Clone)] pub enum RequestedData { BackupInfo, UserKeys, UserData, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct LogUploadConfirmation { pub backup_id: String, pub log_id: usize, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct DownloadedLog { pub content: Vec, pub attachments: Option>, } #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { InvalidAuthorizationHeader, UrlSchemaError, UrlError(url::ParseError), ReqwestError(reqwest::Error), TungsteniteError(TungsteniteError), JsonError(serde_json::Error), BincodeError(bincode::Error), InvalidWSMessage, #[display(fmt = "Error::InvalidBackupMessage({:?})", _0)] InvalidBackupMessage(LogWSResponse), ServerError, LogMissing, WSClosed, Unauthenticated, + InvalidRequest, } impl std::error::Error for Error {} impl From for Error { fn from(_: InvalidHeaderValue) -> Self { Self::InvalidAuthorizationHeader } }