Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F3500106
D4324.id13932.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
4 KB
Referenced Files
None
Subscribers
None
D4324.id13932.diff
View Options
diff --git a/services/backup/src/Constants.h b/services/backup/src/Constants.h
--- a/services/backup/src/Constants.h
+++ b/services/backup/src/Constants.h
@@ -18,9 +18,9 @@
// than the chunk limit, once we get the amount of data of size equal to the
// limit, we wouldn't know if we should put this in the database right away or
// wait for more data.
-// 400KB limit (KB, not KiB, that's why it's 1000 not 1024) -
+// 400KiB limit (in docs there is KB but they mean KiB) -
// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ServiceQuotas.html
-const size_t LOG_DATA_SIZE_DATABASE_LIMIT = 400 * 1000;
+const size_t LOG_DATA_SIZE_DATABASE_LIMIT = 1024 * 400;
} // namespace network
} // namespace comm
diff --git a/services/commtest/tests/backup_test.rs b/services/commtest/tests/backup_test.rs
--- a/services/commtest/tests/backup_test.rs
+++ b/services/commtest/tests/backup_test.rs
@@ -13,8 +13,8 @@
use backup_utils::{BackupData, Item};
use bytesize::ByteSize;
-use tools::get_grpc_chunk_size_limit;
use tools::Error;
+use tools::{get_dynamo_db_item_size_limit, get_grpc_chunk_size_limit};
use backup_utils::BackupServiceClient;
@@ -30,25 +30,30 @@
String::new(),
vec![ByteSize::mib(1).as_u64() as usize; 6],
vec![
+ "holder0".to_string(),
"holder1".to_string(),
"holder2".to_string(),
- "holder3".to_string(),
],
),
log_items: vec![
- Item::new(String::new(), vec![ByteSize::b(100).as_u64() as usize], vec!["holder1".to_string()]),
+ // the item that almost hits the DB limit, we're going to later add a long
+ // list of attachments, so that causes it to exceed the limit.
+ // In this case its data should be moved to the S3
Item::new(
String::new(),
- vec![ByteSize::kb(400).as_u64() as usize],
- vec!["holder2".to_string(), "holder3".to_string()],
+ vec![get_dynamo_db_item_size_limit() - ByteSize::b(100).as_u64() as usize],
+ vec!["holder0".to_string(), "holder1".to_string()],
),
+ // just a small item
+ Item::new(String::new(), vec![ByteSize::b(100).as_u64() as usize], vec!["holder0".to_string()]),
+ // a big item that should be placed in the S3 right away
Item::new(
String::new(),
vec![get_grpc_chunk_size_limit(), get_grpc_chunk_size_limit()],
vec![
+ "holder0".to_string(),
"holder1".to_string(),
"holder2".to_string(),
- "holder3".to_string(),
],
),
],
@@ -122,5 +127,39 @@
);
}
+ // push so many attachments that the log item's data will have to be moved
+ // from the db to the s3
+ let mut attachments_size = 0;
+ let mut i = backup_data.log_items[0].attachments_holders.len();
+ let mut new_attachments: Vec<String> = vec![];
+ while attachments_size < 500 {
+ let att = format!("holder{}", i);
+ attachments_size += att.len();
+ new_attachments.push(att);
+ i += 1;
+ }
+
+ let mut old_attachments =
+ backup_data.log_items[0].attachments_holders.clone();
+ backup_data.log_items[0].attachments_holders = new_attachments;
+ add_attachments::run(&mut client, &backup_data, Some(0)).await?;
+ backup_data.log_items[0]
+ .attachments_holders
+ .append(&mut old_attachments);
+ let result = pull_backup::run(&mut client, &backup_data).await?;
+ // check logs attachments
+ for i in 0..backup_data.log_items.len() {
+ let expected: usize = backup_data.log_items[i].attachments_holders.len();
+ let from_result: usize = result.log_items[i].attachments_holders.len();
+ assert!(
+ from_result == expected,
+ "after attachment add: log {}: number of attachments holders do not match,
+ expected {}, got {}",
+ i,
+ expected,
+ from_result
+ );
+ }
+
Ok(())
}
diff --git a/services/commtest/tests/lib/tools.rs b/services/commtest/tests/lib/tools.rs
--- a/services/commtest/tests/lib/tools.rs
+++ b/services/commtest/tests/lib/tools.rs
@@ -1,7 +1,10 @@
use bytesize::ByteSize;
#[allow(dead_code)]
-pub fn generate_nbytes(number_of_bytes: usize, predefined_byte_value: Option<u8>) -> Vec<u8> {
+pub fn generate_nbytes(
+ number_of_bytes: usize,
+ predefined_byte_value: Option<u8>,
+) -> Vec<u8> {
let byte_value = predefined_byte_value.unwrap_or(b'A');
return vec![byte_value; number_of_bytes];
}
@@ -18,6 +21,11 @@
TonicStatus(tonic::Status),
}
+#[allow(dead_code)]
+pub fn get_dynamo_db_item_size_limit() -> usize {
+ ByteSize::kib(400).as_u64() as usize
+}
+
pub const GRPC_METADATA_SIZE_BYTES: usize = 5;
#[allow(dead_code)]
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Dec 21, 12:57 AM (5 h, 53 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2684276
Default Alt Text
D4324.id13932.diff (4 KB)
Attached To
Mode
D4324: [services] Backup - Fix dynamoDB item size limit
Attached
Detach File
Event Timeline
Log In to Comment