diff --git a/services/commtest/tests/backup/add_attachments.rs b/services/commtest/tests/backup/add_attachments.rs index 5570e9312..37ad8965f 100644 --- a/services/commtest/tests/backup/add_attachments.rs +++ b/services/commtest/tests/backup/add_attachments.rs @@ -1,53 +1,53 @@ #[path = "./backup_utils.rs"] mod backup_utils; #[path = "../lib/tools.rs"] mod tools; use crate::backup_utils::{proto::AddAttachmentsRequest, BackupServiceClient}; use tonic::Request; use crate::backup_utils::BackupData; use crate::tools::{Error, ATTACHMENT_DELIMITER}; // log_index = None means that we add attachments to the backup // log_index = Some(x) means that we add attachments to a specific log pub async fn run( client: &mut BackupServiceClient, backup_data: &BackupData, log_index: Option, ) -> Result<(), Error> { let cloned_user_id = backup_data.user_id.clone(); let cloned_backup_id = backup_data.backup_item.id.clone(); let log_id: String = match log_index { Some(index) => { let log_id = backup_data.log_items[index].id.clone(); println!("add attachments for log {}/{}", index, log_id); log_id - }, + } None => { println!("add attachments for backup"); String::new() - }, + } }; - + let holders: String = match log_index { Some(log_index) => backup_data.log_items[log_index] .attachments_holders .join(ATTACHMENT_DELIMITER), None => backup_data .backup_item .attachments_holders .join(ATTACHMENT_DELIMITER), }; client .add_attachments(Request::new(AddAttachmentsRequest { user_id: cloned_user_id, backup_id: cloned_backup_id, log_id, holders, })) .await?; Ok(()) } diff --git a/services/commtest/tests/backup/pull_backup.rs b/services/commtest/tests/backup/pull_backup.rs index 2d2494c2f..52fca6107 100644 --- a/services/commtest/tests/backup/pull_backup.rs +++ b/services/commtest/tests/backup/pull_backup.rs @@ -1,127 +1,126 @@ #[path = "./backup_utils.rs"] mod backup_utils; #[path = "../lib/tools.rs"] mod tools; use tonic::Request; use std::io::{Error as IOError, ErrorKind}; use crate::backup_utils::{ proto::pull_backup_response::Data, proto::pull_backup_response::Data::*, proto::pull_backup_response::Id, proto::pull_backup_response::Id::*, proto::PullBackupRequest, BackupServiceClient, }; use crate::backup_utils::{BackupData, Item}; use crate::tools::{Error, ATTACHMENT_DELIMITER}; #[derive(PartialEq, Debug)] enum State { Compaction, Log, } pub async fn run( client: &mut BackupServiceClient, backup_data: &BackupData, ) -> Result { println!("pull backup"); let cloned_user_id = backup_data.user_id.clone(); let cloned_backup_id = backup_data.backup_item.id.clone(); let mut result = BackupData { user_id: String::new(), device_id: String::new(), backup_item: Item::new(String::new(), Vec::new(), Vec::new()), log_items: Vec::new(), }; let response = client .pull_backup(Request::new(PullBackupRequest { user_id: cloned_user_id, backup_id: cloned_backup_id, })) .await?; let mut inbound = response.into_inner(); let mut state: State = State::Compaction; let mut current_id: String = String::new(); while let Some(response) = inbound.message().await? { let response_data: Option = response.data; let id: Option = response.id; let mut backup_id: Option = None; let mut log_id: Option = None; match id { - Some(BackupId(id)) => { - backup_id = Some(id) - }, - Some(LogId(id)) => { - log_id = Some(id) - }, - None => {}, + Some(BackupId(id)) => backup_id = Some(id), + Some(LogId(id)) => log_id = Some(id), + None => {} }; match response_data { Some(CompactionChunk(chunk)) => { assert_eq!( - state, State::Compaction, + state, + State::Compaction, "invalid state, expected compaction, got {:?}", state ); current_id = backup_id.ok_or(IOError::new(ErrorKind::Other, "backup id expected but not received"))?; println!( "compaction (id {}), pushing chunk (size: {})", current_id, chunk.len() ); result.backup_item.chunks_sizes.push(chunk.len()) } Some(LogChunk(chunk)) => { if state == State::Compaction { state = State::Log; } assert_eq!(state, State::Log, "invalid state, expected compaction"); let log_id = log_id.ok_or(IOError::new(ErrorKind::Other, "log id expected but not received"))?; if log_id != current_id { - result - .log_items - .push(Item::new(log_id.clone(), Vec::new(), Vec::new())); + result.log_items.push(Item::new( + log_id.clone(), + Vec::new(), + Vec::new(), + )); current_id = log_id; } let log_items_size = result.log_items.len() - 1; result.log_items[log_items_size] .chunks_sizes .push(chunk.len()); println!("log (id {}) chunk size {}", current_id, chunk.len()); } Some(AttachmentHolders(holders)) => { let holders_split: Vec<&str> = holders.split(ATTACHMENT_DELIMITER).collect(); if state == State::Compaction { println!("attachments for the backup: {}", holders); for holder in holders_split { if holder.len() == 0 { continue; } result .backup_item .attachments_holders .push(holder.to_string()); } } else if state == State::Log { println!("attachments for the log: {}", holders); for holder in holders_split { if holder.len() == 0 { continue; } let log_items_size = result.log_items.len() - 1; result.log_items[log_items_size] .attachments_holders .push(holder.to_string()) } } } - None => {}, + None => {} } } Ok(result) } diff --git a/services/commtest/tests/blob_test.rs b/services/commtest/tests/blob_test.rs index cda33f09f..cabac3d2d 100644 --- a/services/commtest/tests/blob_test.rs +++ b/services/commtest/tests/blob_test.rs @@ -1,78 +1,80 @@ #[path = "./blob/blob_utils.rs"] mod blob_utils; #[path = "./blob/get.rs"] mod get; #[path = "./blob/put.rs"] mod put; #[path = "./blob/remove.rs"] mod remove; #[path = "./lib/tools.rs"] mod tools; use bytesize::ByteSize; use std::env; use blob_utils::{BlobData, BlobServiceClient}; use tools::Error; #[tokio::test] async fn blob_test() -> Result<(), Error> { - let port = env::var("COMM_SERVICES_PORT_BLOB").expect("port env var expected but not received"); - let mut client = BlobServiceClient::connect(format!("http://localhost:{}", port)).await?; + let port = env::var("COMM_SERVICES_PORT_BLOB") + .expect("port env var expected but not received"); + let mut client = + BlobServiceClient::connect(format!("http://localhost:{}", port)).await?; let blob_data = vec![ BlobData { holder: "test_holder001".to_string(), hash: "test_hash001".to_string(), chunks_sizes: vec![ ByteSize::b(100).as_u64() as usize, ByteSize::b(100).as_u64() as usize, ByteSize::b(100).as_u64() as usize, ], }, BlobData { holder: "test_holder002".to_string(), hash: "test_hash002".to_string(), chunks_sizes: vec![ tools::get_grpc_chunk_size_limit(), tools::get_grpc_chunk_size_limit(), ByteSize::b(10).as_u64() as usize, ], }, BlobData { holder: "test_holder003".to_string(), hash: "test_hash003".to_string(), chunks_sizes: vec![ tools::get_grpc_chunk_size_limit(), ByteSize::b(100).as_u64() as usize, tools::get_grpc_chunk_size_limit(), ], }, ]; for item in &blob_data { let data_exists: bool = put::run(&mut client, &item).await?; assert!(!data_exists, "test data should not exist"); } for (i, blob_item) in blob_data.iter().enumerate() { let received_sizes = get::run(&mut client, &blob_item).await?; let expected_data_size = blob_item.chunks_sizes.iter().sum::(); let received_data_size = received_sizes.iter().sum::(); assert_eq!( expected_data_size, received_data_size, "invalid size of data for index {}, expected {}, got {}", i, expected_data_size, received_data_size ); } for item in &blob_data { remove::run(&mut client, &item).await?; assert!( get::run(&mut client, &item).await.is_err(), "item should no longer be available" ); } Ok(()) }