diff --git a/services/commtest/tests/backup/backup_utils.rs b/services/commtest/tests/backup/backup_utils.rs --- a/services/commtest/tests/backup/backup_utils.rs +++ b/services/commtest/tests/backup/backup_utils.rs @@ -36,3 +36,46 @@ pub backup_item: Item, pub log_items: Vec, } + +#[allow(dead_code)] +pub fn compare_backups(backup_data: &BackupData, result: &BackupData) { + // check backup size + let expected: usize = backup_data.backup_item.chunks_sizes.iter().sum(); + let from_result: usize = result.backup_item.chunks_sizes.iter().sum(); + assert_eq!( + from_result, expected, + "backup sizes do not match, expected {}, got {}", + expected, from_result + ); + + // check backup attachments + let expected: usize = backup_data.backup_item.attachments_holders.len(); + let from_result: usize = result.backup_item.attachments_holders.len(); + assert_eq!( + from_result, expected, + "backup: number of attachments holders do not match, expected {}, got {}", + expected, from_result + ); + + // check number of logs + let expected: usize = backup_data.log_items.len(); + let from_result: usize = result.log_items.len(); + assert_eq!( + expected, from_result, + "number of logs do not match, expected {}, got {}", + expected, from_result + ); + + // check log sizes + for i in 0..backup_data.log_items.len() { + let expected: usize = backup_data.log_items[i].chunks_sizes.iter().sum(); + let from_result: usize = result.log_items[i].chunks_sizes.iter().sum(); + assert_eq!( + from_result, expected, + "log number {} sizes do not match, expected {}, got {}", + i, expected, from_result + ); + } + + // todo: check logs attachment holders +} diff --git a/services/commtest/tests/backup_integration_test.rs b/services/commtest/tests/backup_integration_test.rs --- a/services/commtest/tests/backup_integration_test.rs +++ b/services/commtest/tests/backup_integration_test.rs @@ -83,45 +83,9 @@ add_attachments::run(&mut client, &backup_data, Some(log_index)).await?; } - let result = pull_backup::run(&mut client, &backup_data).await?; - - // check backup size - let expected: usize = backup_data.backup_item.chunks_sizes.iter().sum(); - let from_result: usize = result.backup_item.chunks_sizes.iter().sum(); - assert_eq!( - from_result, expected, - "backup sizes do not match, expected {}, got {}", - expected, from_result - ); - - // check backup attachments - let expected: usize = backup_data.backup_item.attachments_holders.len(); - let from_result: usize = result.backup_item.attachments_holders.len(); - assert_eq!( - from_result, expected, - "backup: number of attachments holders do not match, expected {}, got {}", - expected, from_result - ); + let result: BackupData = pull_backup::run(&mut client, &backup_data).await?; - // check number of logs - let expected: usize = backup_data.log_items.len(); - let from_result: usize = result.log_items.len(); - assert_eq!( - expected, from_result, - "number of logs do not match, expected {}, got {}", - expected, from_result - ); - - // check log sizes - for i in 0..backup_data.log_items.len() { - let expected: usize = backup_data.log_items[i].chunks_sizes.iter().sum(); - let from_result: usize = result.log_items[i].chunks_sizes.iter().sum(); - assert_eq!( - from_result, expected, - "log number {} sizes do not match, expected {}, got {}", - i, expected, from_result - ); - } + backup_utils::compare_backups(&backup_data, &result); // push so many attachments that the log item's data will have to be moved // from the db to the s3 diff --git a/services/commtest/tests/backup_performance_test.rs b/services/commtest/tests/backup_performance_test.rs --- a/services/commtest/tests/backup_performance_test.rs +++ b/services/commtest/tests/backup_performance_test.rs @@ -179,7 +179,7 @@ rt.block_on(async { println!("performing ADD ATTACHMENTS - LOGS operations"); let mut handlers = vec![]; - for (_, backup_item) in backup_data.iter().enumerate() { + for backup_item in &backup_data { let backup_item_cloned = backup_item.clone(); for (log_index, _) in backup_item_cloned.log_items.iter().enumerate() { let backup_item_recloned = backup_item_cloned.clone();