diff --git a/services/commtest/tests/backup_test.rs b/services/commtest/tests/backup_integration_test.rs similarity index 98% rename from services/commtest/tests/backup_test.rs rename to services/commtest/tests/backup_integration_test.rs index dc4ded4bb..81d2cc121 100644 --- a/services/commtest/tests/backup_test.rs +++ b/services/commtest/tests/backup_integration_test.rs @@ -1,160 +1,161 @@ #[path = "./backup/add_attachments.rs"] mod add_attachments; #[path = "./backup/backup_utils.rs"] mod backup_utils; #[path = "./backup/create_new_backup.rs"] mod create_new_backup; #[path = "./backup/pull_backup.rs"] mod pull_backup; #[path = "./backup/send_log.rs"] mod send_log; #[path = "./lib/tools.rs"] mod tools; use backup_utils::{BackupData, Item}; use bytesize::ByteSize; use tools::Error; use std::env; use backup_utils::BackupServiceClient; #[tokio::test] -async fn backup_test() -> Result<(), Error> { +async fn backup_integration_test() -> Result<(), Error> { let port = env::var("COMM_SERVICES_PORT_BACKUP") .expect("port env var expected but not received"); let mut client = BackupServiceClient::connect(format!("http://localhost:{}", port)).await?; let attachments_fill_size: u64 = 500; let mut backup_data = BackupData { user_id: "user0000".to_string(), device_id: "device0000".to_string(), backup_item: Item::new( String::new(), vec![ByteSize::mib(1).as_u64() as usize; 6], vec![ "holder0".to_string(), "holder1".to_string(), "holder2".to_string(), ], ), log_items: vec![ // the item that almost hits the DB limit, we're going to later add a long // list of attachments, so that causes it to exceed the limit. // In this case its data should be moved to the S3 Item::new( String::new(), vec![ *tools::DYNAMO_DB_ITEM_SIZE_LIMIT - ByteSize::b(attachments_fill_size / 2).as_u64() as usize, ], vec!["holder0".to_string(), "holder1".to_string()], ), // just a small item Item::new( String::new(), vec![ByteSize::b(100).as_u64() as usize], vec!["holder0".to_string()], ), // a big item that should be placed in the S3 right away Item::new( String::new(), vec![*tools::GRPC_CHUNK_SIZE_LIMIT, *tools::GRPC_CHUNK_SIZE_LIMIT], vec![ "holder0".to_string(), "holder1".to_string(), "holder2".to_string(), ], ), ], }; + backup_data.backup_item.id = create_new_backup::run(&mut client, &backup_data).await?; println!("backup id in main: {}", backup_data.backup_item.id); add_attachments::run(&mut client, &backup_data, None).await?; for log_index in 0..backup_data.log_items.len() { backup_data.log_items[log_index].id = send_log::run(&mut client, &backup_data, log_index).await?; add_attachments::run(&mut client, &backup_data, Some(log_index)).await?; } let result = pull_backup::run(&mut client, &backup_data).await?; // check backup size let expected: usize = backup_data.backup_item.chunks_sizes.iter().sum(); let from_result: usize = result.backup_item.chunks_sizes.iter().sum(); assert_eq!( from_result, expected, "backup sizes do not match, expected {}, got {}", expected, from_result ); // check backup attachments let expected: usize = backup_data.backup_item.attachments_holders.len(); let from_result: usize = result.backup_item.attachments_holders.len(); assert_eq!( from_result, expected, "backup: number of attachments holders do not match, expected {}, got {}", expected, from_result ); // check number of logs let expected: usize = backup_data.log_items.len(); let from_result: usize = result.log_items.len(); assert_eq!( expected, from_result, "number of logs do not match, expected {}, got {}", expected, from_result ); // check log sizes for i in 0..backup_data.log_items.len() { let expected: usize = backup_data.log_items[i].chunks_sizes.iter().sum(); let from_result: usize = result.log_items[i].chunks_sizes.iter().sum(); assert_eq!( from_result, expected, "log number {} sizes do not match, expected {}, got {}", i, expected, from_result ); } // push so many attachments that the log item's data will have to be moved // from the db to the s3 let mut attachments_size = 0; let mut i = backup_data.log_items[0].attachments_holders.len(); let mut new_attachments: Vec = Vec::new(); while attachments_size < (attachments_fill_size as usize) { let att = format!("holder{}", i); attachments_size += att.len(); new_attachments.push(att); i += 1; } let mut old_attachments = backup_data.log_items[0].attachments_holders.clone(); backup_data.log_items[0].attachments_holders = new_attachments; add_attachments::run(&mut client, &backup_data, Some(0)).await?; backup_data.log_items[0] .attachments_holders .append(&mut old_attachments); let result = pull_backup::run(&mut client, &backup_data).await?; // check logs attachments for i in 0..backup_data.log_items.len() { let expected: usize = backup_data.log_items[i].attachments_holders.len(); let from_result: usize = result.log_items[i].attachments_holders.len(); assert_eq!( from_result, expected, "after attachment add: log {}: number of attachments holders do not match, expected {}, got {}", i, expected, from_result ); } Ok(()) } diff --git a/services/commtest/tests/blob_test.rs b/services/commtest/tests/blob_integration_test.rs similarity index 97% rename from services/commtest/tests/blob_test.rs rename to services/commtest/tests/blob_integration_test.rs index 7405b5a78..bff9056d7 100644 --- a/services/commtest/tests/blob_test.rs +++ b/services/commtest/tests/blob_integration_test.rs @@ -1,80 +1,80 @@ #[path = "./blob/blob_utils.rs"] mod blob_utils; #[path = "./blob/get.rs"] mod get; #[path = "./blob/put.rs"] mod put; #[path = "./blob/remove.rs"] mod remove; #[path = "./lib/tools.rs"] mod tools; use bytesize::ByteSize; use std::env; use blob_utils::{BlobData, BlobServiceClient}; use tools::Error; #[tokio::test] -async fn blob_test() -> Result<(), Error> { +async fn blob_integration_test() -> Result<(), Error> { let port = env::var("COMM_SERVICES_PORT_BLOB") .expect("port env var expected but not received"); let mut client = BlobServiceClient::connect(format!("http://localhost:{}", port)).await?; let blob_data = vec![ BlobData { holder: "test_holder001".to_string(), hash: "test_hash001".to_string(), chunks_sizes: vec![ ByteSize::b(100).as_u64() as usize, ByteSize::b(100).as_u64() as usize, ByteSize::b(100).as_u64() as usize, ], }, BlobData { holder: "test_holder002".to_string(), hash: "test_hash002".to_string(), chunks_sizes: vec![ *tools::GRPC_CHUNK_SIZE_LIMIT, *tools::GRPC_CHUNK_SIZE_LIMIT, ByteSize::b(10).as_u64() as usize, ], }, BlobData { holder: "test_holder003".to_string(), hash: "test_hash003".to_string(), chunks_sizes: vec![ *tools::GRPC_CHUNK_SIZE_LIMIT, ByteSize::b(100).as_u64() as usize, *tools::GRPC_CHUNK_SIZE_LIMIT, ], }, ]; for item in &blob_data { let data_exists: bool = put::run(&mut client, &item).await?; assert!(!data_exists, "test data should not exist"); } for (i, blob_item) in blob_data.iter().enumerate() { let received_sizes = get::run(&mut client, &blob_item).await?; let expected_data_size = blob_item.chunks_sizes.iter().sum::(); let received_data_size = received_sizes.iter().sum::(); assert_eq!( expected_data_size, received_data_size, "invalid size of data for index {}, expected {}, got {}", i, expected_data_size, received_data_size ); } for item in &blob_data { remove::run(&mut client, &item).await?; assert!( get::run(&mut client, &item).await.is_err(), "item should no longer be available" ); } Ok(()) } diff --git a/services/commtest/tests/tunnelbroker_test.rs b/services/commtest/tests/tunnelbroker_integration_test.rs similarity index 68% rename from services/commtest/tests/tunnelbroker_test.rs rename to services/commtest/tests/tunnelbroker_integration_test.rs index 443eb9739..d4bd4e5d0 100644 --- a/services/commtest/tests/tunnelbroker_test.rs +++ b/services/commtest/tests/tunnelbroker_integration_test.rs @@ -1,7 +1,7 @@ #[path = "./lib/tools.rs"] mod tools; #[tokio::test] -async fn tunnelbroker_test() { +async fn tunnelbroker_integration_test() { assert!(false, "not implemented"); } diff --git a/services/scripts/run_integration_tests.sh b/services/scripts/run_integration_tests.sh index 6d32af50a..e24e9a442 100755 --- a/services/scripts/run_integration_tests.sh +++ b/services/scripts/run_integration_tests.sh @@ -1,47 +1,47 @@ #!/usr/bin/env bash set -e export COMM_TEST_SERVICES=1 export COMM_SERVICES_SANDBOX=1 SERVICES=$(./scripts/list_services.sh) run_integration_test () { echo "integration tests tests will be run for the $1 service" # add -- --nocapture in the end to enable logs - cargo test "$1"_test --test '*' --manifest-path=commtest/Cargo.toml #-- --nocapture + cargo test "$1"_integration_test --test '*' --manifest-path=commtest/Cargo.toml #-- --nocapture } list_expected () { echo "Expected one of these:"; echo "$SERVICES"; echo "all"; } if [[ -z "$1" ]]; then echo "No service specified"; list_expected; exit 1; fi if [[ "$1" == "all" ]]; then for SERVICE in "$SERVICES"; do run_integration_test "$SERVICE" done exit 0; fi; SERVICE=$(grep "$1" <<< "$SERVICES") if [[ "$SERVICE" != "$1" ]]; then echo "No such service: $1"; list_expected; exit 1; fi; set -o allexport source .env set +o allexport run_integration_test "$SERVICE"