diff --git a/services/commtest/tests/backup/backup_utils.rs b/services/commtest/tests/backup/backup_utils.rs index 2c4bcdf34..9fe4eae33 100644 --- a/services/commtest/tests/backup/backup_utils.rs +++ b/services/commtest/tests/backup/backup_utils.rs @@ -1,38 +1,81 @@ pub mod proto { tonic::include_proto!("backup"); } pub use proto::backup_service_client::BackupServiceClient; // stands for both, backup and log items #[allow(dead_code)] #[derive(Clone)] pub struct Item { pub id: String, pub chunks_sizes: Vec, pub attachments_holders: Vec, } #[allow(dead_code)] impl Item { pub fn new( id: String, chunks_sizes: Vec, attachments_holders: Vec, ) -> Item { Item { id, chunks_sizes, attachments_holders, } } } #[allow(dead_code)] #[derive(Clone)] pub struct BackupData { pub user_id: String, pub device_id: String, pub backup_item: Item, pub log_items: Vec, } + +#[allow(dead_code)] +pub fn compare_backups(backup_data: &BackupData, result: &BackupData) { + // check backup size + let expected: usize = backup_data.backup_item.chunks_sizes.iter().sum(); + let from_result: usize = result.backup_item.chunks_sizes.iter().sum(); + assert_eq!( + from_result, expected, + "backup sizes do not match, expected {}, got {}", + expected, from_result + ); + + // check backup attachments + let expected: usize = backup_data.backup_item.attachments_holders.len(); + let from_result: usize = result.backup_item.attachments_holders.len(); + assert_eq!( + from_result, expected, + "backup: number of attachments holders do not match, expected {}, got {}", + expected, from_result + ); + + // check number of logs + let expected: usize = backup_data.log_items.len(); + let from_result: usize = result.log_items.len(); + assert_eq!( + expected, from_result, + "number of logs do not match, expected {}, got {}", + expected, from_result + ); + + // check log sizes + for i in 0..backup_data.log_items.len() { + let expected: usize = backup_data.log_items[i].chunks_sizes.iter().sum(); + let from_result: usize = result.log_items[i].chunks_sizes.iter().sum(); + assert_eq!( + from_result, expected, + "log number {} sizes do not match, expected {}, got {}", + i, expected, from_result + ); + } + + // todo: check logs attachment holders +} diff --git a/services/commtest/tests/backup_integration_test.rs b/services/commtest/tests/backup_integration_test.rs index 81d2cc121..231ef70cc 100644 --- a/services/commtest/tests/backup_integration_test.rs +++ b/services/commtest/tests/backup_integration_test.rs @@ -1,161 +1,125 @@ #[path = "./backup/add_attachments.rs"] mod add_attachments; #[path = "./backup/backup_utils.rs"] mod backup_utils; #[path = "./backup/create_new_backup.rs"] mod create_new_backup; #[path = "./backup/pull_backup.rs"] mod pull_backup; #[path = "./backup/send_log.rs"] mod send_log; #[path = "./lib/tools.rs"] mod tools; use backup_utils::{BackupData, Item}; use bytesize::ByteSize; use tools::Error; use std::env; use backup_utils::BackupServiceClient; #[tokio::test] async fn backup_integration_test() -> Result<(), Error> { let port = env::var("COMM_SERVICES_PORT_BACKUP") .expect("port env var expected but not received"); let mut client = BackupServiceClient::connect(format!("http://localhost:{}", port)).await?; let attachments_fill_size: u64 = 500; let mut backup_data = BackupData { user_id: "user0000".to_string(), device_id: "device0000".to_string(), backup_item: Item::new( String::new(), vec![ByteSize::mib(1).as_u64() as usize; 6], vec![ "holder0".to_string(), "holder1".to_string(), "holder2".to_string(), ], ), log_items: vec![ // the item that almost hits the DB limit, we're going to later add a long // list of attachments, so that causes it to exceed the limit. // In this case its data should be moved to the S3 Item::new( String::new(), vec![ *tools::DYNAMO_DB_ITEM_SIZE_LIMIT - ByteSize::b(attachments_fill_size / 2).as_u64() as usize, ], vec!["holder0".to_string(), "holder1".to_string()], ), // just a small item Item::new( String::new(), vec![ByteSize::b(100).as_u64() as usize], vec!["holder0".to_string()], ), // a big item that should be placed in the S3 right away Item::new( String::new(), vec![*tools::GRPC_CHUNK_SIZE_LIMIT, *tools::GRPC_CHUNK_SIZE_LIMIT], vec![ "holder0".to_string(), "holder1".to_string(), "holder2".to_string(), ], ), ], }; backup_data.backup_item.id = create_new_backup::run(&mut client, &backup_data).await?; println!("backup id in main: {}", backup_data.backup_item.id); add_attachments::run(&mut client, &backup_data, None).await?; for log_index in 0..backup_data.log_items.len() { backup_data.log_items[log_index].id = send_log::run(&mut client, &backup_data, log_index).await?; add_attachments::run(&mut client, &backup_data, Some(log_index)).await?; } - let result = pull_backup::run(&mut client, &backup_data).await?; - - // check backup size - let expected: usize = backup_data.backup_item.chunks_sizes.iter().sum(); - let from_result: usize = result.backup_item.chunks_sizes.iter().sum(); - assert_eq!( - from_result, expected, - "backup sizes do not match, expected {}, got {}", - expected, from_result - ); - - // check backup attachments - let expected: usize = backup_data.backup_item.attachments_holders.len(); - let from_result: usize = result.backup_item.attachments_holders.len(); - assert_eq!( - from_result, expected, - "backup: number of attachments holders do not match, expected {}, got {}", - expected, from_result - ); + let result: BackupData = pull_backup::run(&mut client, &backup_data).await?; - // check number of logs - let expected: usize = backup_data.log_items.len(); - let from_result: usize = result.log_items.len(); - assert_eq!( - expected, from_result, - "number of logs do not match, expected {}, got {}", - expected, from_result - ); - - // check log sizes - for i in 0..backup_data.log_items.len() { - let expected: usize = backup_data.log_items[i].chunks_sizes.iter().sum(); - let from_result: usize = result.log_items[i].chunks_sizes.iter().sum(); - assert_eq!( - from_result, expected, - "log number {} sizes do not match, expected {}, got {}", - i, expected, from_result - ); - } + backup_utils::compare_backups(&backup_data, &result); // push so many attachments that the log item's data will have to be moved // from the db to the s3 let mut attachments_size = 0; let mut i = backup_data.log_items[0].attachments_holders.len(); let mut new_attachments: Vec = Vec::new(); while attachments_size < (attachments_fill_size as usize) { let att = format!("holder{}", i); attachments_size += att.len(); new_attachments.push(att); i += 1; } let mut old_attachments = backup_data.log_items[0].attachments_holders.clone(); backup_data.log_items[0].attachments_holders = new_attachments; add_attachments::run(&mut client, &backup_data, Some(0)).await?; backup_data.log_items[0] .attachments_holders .append(&mut old_attachments); let result = pull_backup::run(&mut client, &backup_data).await?; // check logs attachments for i in 0..backup_data.log_items.len() { let expected: usize = backup_data.log_items[i].attachments_holders.len(); let from_result: usize = result.log_items[i].attachments_holders.len(); assert_eq!( from_result, expected, "after attachment add: log {}: number of attachments holders do not match, expected {}, got {}", i, expected, from_result ); } Ok(()) }