diff --git a/services/commtest/tests/backup/add_attachments.rs b/services/commtest/tests/backup/add_attachments.rs --- a/services/commtest/tests/backup/add_attachments.rs +++ b/services/commtest/tests/backup/add_attachments.rs @@ -24,13 +24,13 @@ let log_id = backup_data.log_items[index].id.clone(); println!("add attachments for log {}/{}", index, log_id); log_id - }, + } None => { println!("add attachments for backup"); String::new() - }, + } }; - + let holders: String = match log_index { Some(log_index) => backup_data.log_items[log_index] .attachments_holders diff --git a/services/commtest/tests/backup/pull_backup.rs b/services/commtest/tests/backup/pull_backup.rs --- a/services/commtest/tests/backup/pull_backup.rs +++ b/services/commtest/tests/backup/pull_backup.rs @@ -50,18 +50,15 @@ let mut backup_id: Option = None; let mut log_id: Option = None; match id { - Some(BackupId(id)) => { - backup_id = Some(id) - }, - Some(LogId(id)) => { - log_id = Some(id) - }, - None => {}, + Some(BackupId(id)) => backup_id = Some(id), + Some(LogId(id)) => log_id = Some(id), + None => {} }; match response_data { Some(CompactionChunk(chunk)) => { assert_eq!( - state, State::Compaction, + state, + State::Compaction, "invalid state, expected compaction, got {:?}", state ); @@ -80,9 +77,11 @@ assert_eq!(state, State::Log, "invalid state, expected compaction"); let log_id = log_id.expect("log id expected but not received"); if log_id != current_id { - result - .log_items - .push(Item::new(log_id.clone(), Vec::new(), Vec::new())); + result.log_items.push(Item::new( + log_id.clone(), + Vec::new(), + Vec::new(), + )); current_id = log_id; } let log_items_size = result.log_items.len() - 1; @@ -119,7 +118,7 @@ } } } - None => {}, + None => {} } } Ok(result) diff --git a/services/commtest/tests/backup_test.rs b/services/commtest/tests/backup_test.rs --- a/services/commtest/tests/backup_test.rs +++ b/services/commtest/tests/backup_test.rs @@ -21,7 +21,8 @@ #[tokio::test] async fn backup_test() -> Result<(), Error> { - let port = env::var("COMM_SERVICES_PORT_BACKUP").expect("port env var expected but not received"); + let port = env::var("COMM_SERVICES_PORT_BACKUP") + .expect("port env var expected but not received"); let mut client = BackupServiceClient::connect(format!("http://localhost:{}", port)).await?; @@ -45,15 +46,25 @@ // In this case its data should be moved to the S3 Item::new( String::new(), - vec![tools::get_dynamo_db_item_size_limit() - ByteSize::b(attachments_fill_size/2).as_u64() as usize], + vec![ + tools::get_dynamo_db_item_size_limit() + - ByteSize::b(attachments_fill_size / 2).as_u64() as usize, + ], vec!["holder0".to_string(), "holder1".to_string()], ), // just a small item - Item::new(String::new(), vec![ByteSize::b(100).as_u64() as usize], vec!["holder0".to_string()]), + Item::new( + String::new(), + vec![ByteSize::b(100).as_u64() as usize], + vec!["holder0".to_string()], + ), // a big item that should be placed in the S3 right away Item::new( String::new(), - vec![tools::get_grpc_chunk_size_limit(), tools::get_grpc_chunk_size_limit()], + vec![ + tools::get_grpc_chunk_size_limit(), + tools::get_grpc_chunk_size_limit(), + ], vec![ "holder0".to_string(), "holder1".to_string(), @@ -100,8 +111,7 @@ assert_eq!( expected, from_result, "number of logs do not match, expected {}, got {}", - expected, - from_result + expected, from_result ); // check log sizes @@ -111,9 +121,7 @@ assert_eq!( from_result, expected, "log number {} sizes do not match, expected {}, got {}", - i, - expected, - from_result + i, expected, from_result ); } diff --git a/services/commtest/tests/blob_test.rs b/services/commtest/tests/blob_test.rs --- a/services/commtest/tests/blob_test.rs +++ b/services/commtest/tests/blob_test.rs @@ -17,24 +17,38 @@ #[tokio::test] async fn blob_test() -> Result<(), Error> { - let port = env::var("COMM_SERVICES_PORT_BLOB").expect("port env var expected but not received"); - let mut client = BlobServiceClient::connect(format!("http://localhost:{}", port)).await?; + let port = env::var("COMM_SERVICES_PORT_BLOB") + .expect("port env var expected but not received"); + let mut client = + BlobServiceClient::connect(format!("http://localhost:{}", port)).await?; let blob_data = vec![ BlobData { holder: "test_holder001".to_string(), hash: "test_hash001".to_string(), - chunks_sizes: vec![ByteSize::b(100).as_u64() as usize, ByteSize::b(100).as_u64() as usize, ByteSize::b(100).as_u64() as usize], + chunks_sizes: vec![ + ByteSize::b(100).as_u64() as usize, + ByteSize::b(100).as_u64() as usize, + ByteSize::b(100).as_u64() as usize, + ], }, BlobData { holder: "test_holder002".to_string(), hash: "test_hash002".to_string(), - chunks_sizes: vec![tools::get_grpc_chunk_size_limit(), tools::get_grpc_chunk_size_limit(), ByteSize::b(10).as_u64() as usize], + chunks_sizes: vec![ + tools::get_grpc_chunk_size_limit(), + tools::get_grpc_chunk_size_limit(), + ByteSize::b(10).as_u64() as usize, + ], }, BlobData { holder: "test_holder003".to_string(), hash: "test_hash003".to_string(), - chunks_sizes: vec![tools::get_grpc_chunk_size_limit(), ByteSize::b(100).as_u64() as usize, tools::get_grpc_chunk_size_limit()], + chunks_sizes: vec![ + tools::get_grpc_chunk_size_limit(), + ByteSize::b(100).as_u64() as usize, + tools::get_grpc_chunk_size_limit(), + ], }, ]; @@ -50,15 +64,16 @@ assert_eq!( expected_data_size, received_data_size, "invalid size of data for index {}, expected {}, got {}", - i, - expected_data_size, - received_data_size + i, expected_data_size, received_data_size ); } for item in &blob_data { remove::run(&mut client, &item).await?; - assert!(get::run(&mut client, &item).await.is_err(), "item should no longer be available"); + assert!( + get::run(&mut client, &item).await.is_err(), + "item should no longer be available" + ); } Ok(())