diff --git a/native/native_rust_library/src/lib.rs b/native/native_rust_library/src/lib.rs index e233612dc..3b0f5f137 100644 --- a/native/native_rust_library/src/lib.rs +++ b/native/native_rust_library/src/lib.rs @@ -1,1175 +1,1175 @@ use backup::ffi::*; use comm_opaque2::client::{Login, Registration}; use comm_opaque2::grpc::opaque_error_to_grpc_status as handle_error; use ffi::{bool_callback, string_callback, void_callback}; use grpc_clients::identity::protos::authenticated::{ InboundKeyInfo, InboundKeysForUserRequest, KeyserverKeysResponse, OutboundKeyInfo, OutboundKeysForUserRequest, UpdateUserPasswordFinishRequest, UpdateUserPasswordStartRequest, UploadOneTimeKeysRequest, }; use grpc_clients::identity::protos::unauth::{ DeviceKeyUpload, DeviceType, Empty, IdentityKeyInfo, OpaqueLoginFinishRequest, OpaqueLoginStartRequest, Prekey, RegistrationFinishRequest, RegistrationStartRequest, WalletLoginRequest, }; use grpc_clients::identity::{ get_auth_client, get_unauthenticated_client, REQUEST_METADATA_COOKIE_KEY, RESPONSE_METADATA_COOKIE_KEY, }; use lazy_static::lazy_static; use serde::Serialize; use std::sync::Arc; use tokio::runtime::{Builder, Runtime}; use tonic::{Request, Status}; use tracing::instrument; mod argon2_tools; mod backup; mod constants; use argon2_tools::compute_backup_key_str; mod generated { // We get the CODE_VERSION from this generated file include!(concat!(env!("OUT_DIR"), "/version.rs")); // We get the IDENTITY_SOCKET_ADDR from this generated file include!(concat!(env!("OUT_DIR"), "/socket_config.rs")); } pub use generated::CODE_VERSION; pub use generated::{BACKUP_SOCKET_ADDR, IDENTITY_SOCKET_ADDR}; #[cfg(not(target_os = "android"))] pub const DEVICE_TYPE: DeviceType = DeviceType::Ios; #[cfg(target_os = "android")] pub const DEVICE_TYPE: DeviceType = DeviceType::Android; lazy_static! { static ref RUNTIME: Arc = Arc::new(Builder::new_multi_thread().enable_all().build().unwrap()); } #[cxx::bridge] mod ffi { extern "Rust" { #[cxx_name = "identityRegisterUser"] fn register_user( username: String, password: String, key_payload: String, key_payload_signature: String, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, promise_id: u32, ); #[cxx_name = "identityLoginPasswordUser"] fn login_password_user( username: String, password: String, key_payload: String, key_payload_signature: String, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, promise_id: u32, ); #[cxx_name = "identityLoginWalletUser"] fn login_wallet_user( siwe_message: String, siwe_signature: String, key_payload: String, key_payload_signature: String, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, social_proof: String, promise_id: u32, ); #[cxx_name = "identityUpdateUserPassword"] fn update_user_password( user_id: String, device_id: String, access_token: String, password: String, promise_id: u32, ); #[cxx_name = "identityDeleteUser"] fn delete_user( user_id: String, device_id: String, access_token: String, promise_id: u32, ); #[cxx_name = "identityGetOutboundKeysForUser"] fn get_outbound_keys_for_user( auth_user_id: String, auth_device_id: String, auth_access_token: String, user_id: String, promise_id: u32, ); #[cxx_name = "identityGetInboundKeysForUser"] fn get_inbound_keys_for_user( auth_user_id: String, auth_device_id: String, auth_access_token: String, user_id: String, promise_id: u32, ); #[cxx_name = "identityGenerateNonce"] fn generate_nonce(promise_id: u32); #[cxx_name = "identityVersionSupported"] fn version_supported(promise_id: u32); #[cxx_name = "identityUploadOneTimeKeys"] fn upload_one_time_keys( auth_user_id: String, auth_device_id: String, auth_access_token: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, promise_id: u32, ); #[cxx_name = "identityGetKeyserverKeys"] fn get_keyserver_keys( user_id: String, device_id: String, access_token: String, keyserver_id: String, promise_id: u32, ); // Argon2 #[cxx_name = "compute_backup_key"] fn compute_backup_key_str( password: &str, backup_id: &str, ) -> Result<[u8; 32]>; } unsafe extern "C++" { include!("RustCallback.h"); #[namespace = "comm"] #[cxx_name = "stringCallback"] fn string_callback(error: String, promise_id: u32, ret: String); #[namespace = "comm"] #[cxx_name = "voidCallback"] fn void_callback(error: String, promise_id: u32); #[namespace = "comm"] #[cxx_name = "boolCallback"] fn bool_callback(error: String, promise_id: u32, ret: bool); } // AES cryptography #[namespace = "comm"] unsafe extern "C++" { include!("RustAESCrypto.h"); #[allow(unused)] #[cxx_name = "aesGenerateKey"] fn generate_key(buffer: &mut [u8]) -> Result<()>; /// The first two argument aren't mutated but creation of Java ByteBuffer /// requires the underlying bytes to be mutable. #[allow(unused)] #[cxx_name = "aesEncrypt"] fn encrypt( key: &mut [u8], plaintext: &mut [u8], sealed_data: &mut [u8], ) -> Result<()>; /// The first two argument aren't mutated but creation of Java ByteBuffer /// requires the underlying bytes to be mutable. #[allow(unused)] #[cxx_name = "aesDecrypt"] fn decrypt( key: &mut [u8], sealed_data: &mut [u8], plaintext: &mut [u8], ) -> Result<()>; } // Comm Services Auth Metadata Emission #[namespace = "comm"] unsafe extern "C++" { include!("RustCSAMetadataEmitter.h"); #[allow(unused)] #[cxx_name = "sendAuthMetadataToJS"] fn send_auth_metadata_to_js( access_token: String, user_id: String, ) -> Result<()>; } // Backup extern "Rust" { #[cxx_name = "createBackup"] fn create_backup_sync( backup_id: String, backup_secret: String, pickle_key: String, pickled_account: String, user_data: String, promise_id: u32, ); #[cxx_name = "restoreBackup"] fn restore_backup_sync(backup_secret: String, promise_id: u32); } // Secure store #[namespace = "comm"] unsafe extern "C++" { include!("RustSecureStore.h"); #[allow(unused)] #[cxx_name = "secureStoreSet"] fn secure_store_set(key: &str, value: String) -> Result<()>; #[cxx_name = "secureStoreGet"] fn secure_store_get(key: &str) -> Result; } // C++ Backup creation #[namespace = "comm"] unsafe extern "C++" { include!("RustBackupExecutor.h"); #[allow(unused)] #[cxx_name = "getBackupDirectoryPath"] fn get_backup_directory_path() -> Result; #[allow(unused)] #[cxx_name = "getBackupFilePath"] fn get_backup_file_path( backup_id: String, is_attachments: bool, ) -> Result; } } fn handle_string_result_as_callback( result: Result, promise_id: u32, ) where E: std::fmt::Display, { match result { Err(e) => string_callback(e.to_string(), promise_id, "".to_string()), Ok(r) => string_callback("".to_string(), promise_id, r), } } fn handle_void_result_as_callback(result: Result<(), E>, promise_id: u32) where E: std::fmt::Display, { match result { Err(e) => void_callback(e.to_string(), promise_id), Ok(_) => void_callback("".to_string(), promise_id), } } fn handle_bool_result_as_callback(result: Result, promise_id: u32) where E: std::fmt::Display, { match result { Err(e) => bool_callback(e.to_string(), promise_id, false), Ok(r) => bool_callback("".to_string(), promise_id, r), } } fn generate_nonce(promise_id: u32) { RUNTIME.spawn(async move { let result = fetch_nonce().await; handle_string_result_as_callback(result, promise_id); }); } async fn fetch_nonce() -> Result { let mut identity_client = get_unauthenticated_client( IDENTITY_SOCKET_ADDR, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; let nonce = identity_client .generate_nonce(Empty {}) .await? .into_inner() .nonce; Ok(nonce) } fn version_supported(promise_id: u32) { RUNTIME.spawn(async move { let result = version_supported_helper().await; handle_bool_result_as_callback(result, promise_id); }); } async fn version_supported_helper() -> Result { let mut identity_client = get_unauthenticated_client( IDENTITY_SOCKET_ADDR, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; let response = identity_client.ping(Empty {}).await; match response { Ok(_) => Ok(true), Err(e) => { if grpc_clients::error::is_version_unsupported(&e) { Ok(false) } else { Err(e.into()) } } } } fn get_keyserver_keys( user_id: String, device_id: String, access_token: String, keyserver_id: String, promise_id: u32, ) { RUNTIME.spawn(async move { let get_keyserver_keys_request = OutboundKeysForUserRequest { user_id: keyserver_id, }; let auth_info = AuthInfo { access_token, user_id, device_id, }; let result = get_keyserver_keys_helper(get_keyserver_keys_request, auth_info).await; handle_string_result_as_callback(result, promise_id); }); } async fn get_keyserver_keys_helper( get_keyserver_keys_request: OutboundKeysForUserRequest, auth_info: AuthInfo, ) -> Result { let mut identity_client = get_auth_client( IDENTITY_SOCKET_ADDR, auth_info.user_id, auth_info.device_id, auth_info.access_token, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; - let mut response = identity_client + let response = identity_client .get_keyserver_keys(get_keyserver_keys_request) .await? .into_inner(); let keyserver_keys = OutboundKeyInfoResponse::try_from(response)?; Ok(serde_json::to_string(&keyserver_keys)?) } struct AuthInfo { user_id: String, device_id: String, access_token: String, } #[instrument] fn register_user( username: String, password: String, key_payload: String, key_payload_signature: String, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, promise_id: u32, ) { RUNTIME.spawn(async move { let password_user_info = PasswordUserInfo { username, password, key_payload, key_payload_signature, content_prekey, content_prekey_signature, notif_prekey, notif_prekey_signature, content_one_time_keys, notif_one_time_keys, }; let result = register_user_helper(password_user_info).await; handle_string_result_as_callback(result, promise_id); }); } struct PasswordUserInfo { username: String, password: String, key_payload: String, key_payload_signature: String, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, } #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct UserIDAndDeviceAccessToken { #[serde(rename = "userID")] user_id: String, access_token: String, } async fn register_user_helper( password_user_info: PasswordUserInfo, ) -> Result { let mut client_registration = Registration::new(); let opaque_registration_request = client_registration .start(&password_user_info.password) .map_err(handle_error)?; let registration_start_request = RegistrationStartRequest { opaque_registration_request, username: password_user_info.username, device_key_upload: Some(DeviceKeyUpload { device_key_info: Some(IdentityKeyInfo { payload: password_user_info.key_payload, payload_signature: password_user_info.key_payload_signature, social_proof: None, }), content_upload: Some(Prekey { prekey: password_user_info.content_prekey, prekey_signature: password_user_info.content_prekey_signature, }), notif_upload: Some(Prekey { prekey: password_user_info.notif_prekey, prekey_signature: password_user_info.notif_prekey_signature, }), one_time_content_prekeys: password_user_info.content_one_time_keys, one_time_notif_prekeys: password_user_info.notif_one_time_keys, device_type: DEVICE_TYPE.into(), }), }; let mut identity_client = get_unauthenticated_client( IDENTITY_SOCKET_ADDR, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; let response = identity_client .register_password_user_start(registration_start_request) .await?; // We need to get the load balancer cookie from from the response and send it // in the subsequent request to ensure it is routed to the same identity // service instance as the first request let cookie = response .metadata() .get(RESPONSE_METADATA_COOKIE_KEY) .cloned(); let registration_start_response = response.into_inner(); let opaque_registration_upload = client_registration .finish( &password_user_info.password, ®istration_start_response.opaque_registration_response, ) .map_err(handle_error)?; let registration_finish_request = RegistrationFinishRequest { session_id: registration_start_response.session_id, opaque_registration_upload, }; let mut finish_request = Request::new(registration_finish_request); // Cookie won't be available in local dev environments if let Some(cookie_metadata) = cookie { finish_request .metadata_mut() .insert(REQUEST_METADATA_COOKIE_KEY, cookie_metadata); } let registration_finish_response = identity_client .register_password_user_finish(finish_request) .await? .into_inner(); let user_id_and_access_token = UserIDAndDeviceAccessToken { user_id: registration_finish_response.user_id, access_token: registration_finish_response.access_token, }; Ok(serde_json::to_string(&user_id_and_access_token)?) } #[instrument] fn login_password_user( username: String, password: String, key_payload: String, key_payload_signature: String, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, promise_id: u32, ) { RUNTIME.spawn(async move { let password_user_info = PasswordUserInfo { username, password, key_payload, key_payload_signature, content_prekey, content_prekey_signature, notif_prekey, notif_prekey_signature, content_one_time_keys, notif_one_time_keys, }; let result = login_password_user_helper(password_user_info).await; handle_string_result_as_callback(result, promise_id); }); } async fn login_password_user_helper( password_user_info: PasswordUserInfo, ) -> Result { let mut client_login = Login::new(); let opaque_login_request = client_login .start(&password_user_info.password) .map_err(handle_error)?; let login_start_request = OpaqueLoginStartRequest { opaque_login_request, username: password_user_info.username, device_key_upload: Some(DeviceKeyUpload { device_key_info: Some(IdentityKeyInfo { payload: password_user_info.key_payload, payload_signature: password_user_info.key_payload_signature, social_proof: None, }), content_upload: Some(Prekey { prekey: password_user_info.content_prekey, prekey_signature: password_user_info.content_prekey_signature, }), notif_upload: Some(Prekey { prekey: password_user_info.notif_prekey, prekey_signature: password_user_info.notif_prekey_signature, }), one_time_content_prekeys: password_user_info.content_one_time_keys, one_time_notif_prekeys: password_user_info.notif_one_time_keys, device_type: DEVICE_TYPE.into(), }), }; let mut identity_client = get_unauthenticated_client( IDENTITY_SOCKET_ADDR, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; let response = identity_client .log_in_password_user_start(login_start_request) .await?; // We need to get the load balancer cookie from from the response and send it // in the subsequent request to ensure it is routed to the same identity // service instance as the first request let cookie = response .metadata() .get(RESPONSE_METADATA_COOKIE_KEY) .cloned(); let login_start_response = response.into_inner(); let opaque_login_upload = client_login .finish(&login_start_response.opaque_login_response) .map_err(handle_error)?; let login_finish_request = OpaqueLoginFinishRequest { session_id: login_start_response.session_id, opaque_login_upload, }; let mut finish_request = Request::new(login_finish_request); // Cookie won't be available in local dev environments if let Some(cookie_metadata) = cookie { finish_request .metadata_mut() .insert(REQUEST_METADATA_COOKIE_KEY, cookie_metadata); } let login_finish_response = identity_client .log_in_password_user_finish(finish_request) .await? .into_inner(); let user_id_and_access_token = UserIDAndDeviceAccessToken { user_id: login_finish_response.user_id, access_token: login_finish_response.access_token, }; Ok(serde_json::to_string(&user_id_and_access_token)?) } struct WalletUserInfo { siwe_message: String, siwe_signature: String, key_payload: String, key_payload_signature: String, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, social_proof: String, } #[instrument] fn login_wallet_user( siwe_message: String, siwe_signature: String, key_payload: String, key_payload_signature: String, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, social_proof: String, promise_id: u32, ) { RUNTIME.spawn(async move { let wallet_user_info = WalletUserInfo { siwe_message, siwe_signature, key_payload, key_payload_signature, content_prekey, content_prekey_signature, notif_prekey, notif_prekey_signature, content_one_time_keys, notif_one_time_keys, social_proof, }; let result = login_wallet_user_helper(wallet_user_info).await; handle_string_result_as_callback(result, promise_id); }); } async fn login_wallet_user_helper( wallet_user_info: WalletUserInfo, ) -> Result { let login_request = WalletLoginRequest { siwe_message: wallet_user_info.siwe_message, siwe_signature: wallet_user_info.siwe_signature, device_key_upload: Some(DeviceKeyUpload { device_key_info: Some(IdentityKeyInfo { payload: wallet_user_info.key_payload, payload_signature: wallet_user_info.key_payload_signature, social_proof: Some(wallet_user_info.social_proof), }), content_upload: Some(Prekey { prekey: wallet_user_info.content_prekey, prekey_signature: wallet_user_info.content_prekey_signature, }), notif_upload: Some(Prekey { prekey: wallet_user_info.notif_prekey, prekey_signature: wallet_user_info.notif_prekey_signature, }), one_time_content_prekeys: wallet_user_info.content_one_time_keys, one_time_notif_prekeys: wallet_user_info.notif_one_time_keys, device_type: DEVICE_TYPE.into(), }), }; let mut identity_client = get_unauthenticated_client( IDENTITY_SOCKET_ADDR, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; let login_response = identity_client .log_in_wallet_user(login_request) .await? .into_inner(); let user_id_and_access_token = UserIDAndDeviceAccessToken { user_id: login_response.user_id, access_token: login_response.access_token, }; Ok(serde_json::to_string(&user_id_and_access_token)?) } struct UpdatePasswordInfo { user_id: String, device_id: String, access_token: String, password: String, } fn update_user_password( user_id: String, device_id: String, access_token: String, password: String, promise_id: u32, ) { RUNTIME.spawn(async move { let update_password_info = UpdatePasswordInfo { access_token, user_id, device_id, password, }; let result = update_user_password_helper(update_password_info).await; handle_void_result_as_callback(result, promise_id); }); } async fn update_user_password_helper( update_password_info: UpdatePasswordInfo, ) -> Result<(), Error> { let mut client_registration = Registration::new(); let opaque_registration_request = client_registration .start(&update_password_info.password) .map_err(handle_error)?; let update_password_start_request = UpdateUserPasswordStartRequest { opaque_registration_request, }; let mut identity_client = get_auth_client( IDENTITY_SOCKET_ADDR, update_password_info.user_id, update_password_info.device_id, update_password_info.access_token, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; let response = identity_client .update_user_password_start(update_password_start_request) .await?; // We need to get the load balancer cookie from from the response and send it // in the subsequent request to ensure it is routed to the same identity // service instance as the first request let cookie = response .metadata() .get(RESPONSE_METADATA_COOKIE_KEY) .cloned(); let update_password_start_response = response.into_inner(); let opaque_registration_upload = client_registration .finish( &update_password_info.password, &update_password_start_response.opaque_registration_response, ) .map_err(handle_error)?; let update_password_finish_request = UpdateUserPasswordFinishRequest { session_id: update_password_start_response.session_id, opaque_registration_upload, }; let mut finish_request = Request::new(update_password_finish_request); // Cookie won't be available in local dev environments if let Some(cookie_metadata) = cookie { finish_request .metadata_mut() .insert(REQUEST_METADATA_COOKIE_KEY, cookie_metadata); } identity_client .update_user_password_finish(finish_request) .await?; Ok(()) } fn delete_user( user_id: String, device_id: String, access_token: String, promise_id: u32, ) { RUNTIME.spawn(async move { let auth_info = AuthInfo { access_token, user_id, device_id, }; let result = delete_user_helper(auth_info).await; handle_void_result_as_callback(result, promise_id); }); } async fn delete_user_helper(auth_info: AuthInfo) -> Result<(), Error> { let mut identity_client = get_auth_client( IDENTITY_SOCKET_ADDR, auth_info.user_id, auth_info.device_id, auth_info.access_token, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; identity_client.delete_user(Empty {}).await?; Ok(()) } struct GetOutboundKeysRequestInfo { user_id: String, } struct GetInboundKeysRequestInfo { user_id: String, } // This struct should not be altered without also updating // OutboundKeyInfoResponse in lib/types/identity-service-types.js #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct OutboundKeyInfoResponse { pub payload: String, pub payload_signature: String, pub social_proof: Option, pub content_prekey: String, pub content_prekey_signature: String, pub notif_prekey: String, pub notif_prekey_signature: String, pub one_time_content_prekey: Option, pub one_time_notif_prekey: Option, } // This struct should not be altered without also updating // InboundKeyInfoResponse in lib/types/identity-service-types.js #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct InboundKeyInfoResponse { pub payload: String, pub payload_signature: String, pub social_proof: Option, pub content_prekey: String, pub content_prekey_signature: String, pub notif_prekey: String, pub notif_prekey_signature: String, } impl TryFrom for OutboundKeyInfoResponse { type Error = Error; fn try_from(key_info: OutboundKeyInfo) -> Result { let identity_info = key_info.identity_info.ok_or(Error::MissingResponseData)?; let IdentityKeyInfo { payload, payload_signature, social_proof, } = identity_info; let content_prekey = key_info.content_prekey.ok_or(Error::MissingResponseData)?; let Prekey { prekey: content_prekey_value, prekey_signature: content_prekey_signature, } = content_prekey; let notif_prekey = key_info.notif_prekey.ok_or(Error::MissingResponseData)?; let Prekey { prekey: notif_prekey_value, prekey_signature: notif_prekey_signature, } = notif_prekey; let one_time_content_prekey = key_info.one_time_content_prekey; let one_time_notif_prekey = key_info.one_time_notif_prekey; Ok(Self { payload, payload_signature, social_proof, content_prekey: content_prekey_value, content_prekey_signature, notif_prekey: notif_prekey_value, notif_prekey_signature, one_time_content_prekey, one_time_notif_prekey, }) } } impl TryFrom for OutboundKeyInfoResponse { type Error = Error; fn try_from(response: KeyserverKeysResponse) -> Result { let key_info = response.keyserver_info.ok_or(Error::MissingResponseData)?; Self::try_from(key_info) } } fn get_outbound_keys_for_user( auth_user_id: String, auth_device_id: String, auth_access_token: String, user_id: String, promise_id: u32, ) { RUNTIME.spawn(async move { let get_outbound_keys_request_info = GetOutboundKeysRequestInfo { user_id }; let auth_info = AuthInfo { access_token: auth_access_token, user_id: auth_user_id, device_id: auth_device_id, }; let result = get_outbound_keys_for_user_helper( get_outbound_keys_request_info, auth_info, ) .await; handle_string_result_as_callback(result, promise_id); }); } async fn get_outbound_keys_for_user_helper( get_outbound_keys_request_info: GetOutboundKeysRequestInfo, auth_info: AuthInfo, ) -> Result { let mut identity_client = get_auth_client( IDENTITY_SOCKET_ADDR, auth_info.user_id, auth_info.device_id, auth_info.access_token, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; let response = identity_client .get_outbound_keys_for_user(OutboundKeysForUserRequest { user_id: get_outbound_keys_request_info.user_id, }) .await? .into_inner(); let outbound_key_info: Vec = response .devices .into_values() .map(OutboundKeyInfoResponse::try_from) .collect::, _>>()?; Ok(serde_json::to_string(&outbound_key_info)?) } impl TryFrom for InboundKeyInfoResponse { type Error = Error; fn try_from(key_info: InboundKeyInfo) -> Result { let identity_info = key_info.identity_info.ok_or(Error::MissingResponseData)?; let IdentityKeyInfo { payload, payload_signature, social_proof, } = identity_info; let content_prekey = key_info.content_prekey.ok_or(Error::MissingResponseData)?; let Prekey { prekey: content_prekey_value, prekey_signature: content_prekey_signature, } = content_prekey; let notif_prekey = key_info.notif_prekey.ok_or(Error::MissingResponseData)?; let Prekey { prekey: notif_prekey_value, prekey_signature: notif_prekey_signature, } = notif_prekey; Ok(Self { payload, payload_signature, social_proof, content_prekey: content_prekey_value, content_prekey_signature, notif_prekey: notif_prekey_value, notif_prekey_signature, }) } } fn get_inbound_keys_for_user( auth_user_id: String, auth_device_id: String, auth_access_token: String, user_id: String, promise_id: u32, ) { RUNTIME.spawn(async move { let get_inbound_keys_request_info = GetInboundKeysRequestInfo { user_id }; let auth_info = AuthInfo { access_token: auth_access_token, user_id: auth_user_id, device_id: auth_device_id, }; let result = get_inbound_keys_for_user_helper( get_inbound_keys_request_info, auth_info, ) .await; handle_string_result_as_callback(result, promise_id); }); } async fn get_inbound_keys_for_user_helper( get_inbound_keys_request_info: GetInboundKeysRequestInfo, auth_info: AuthInfo, ) -> Result { let mut identity_client = get_auth_client( IDENTITY_SOCKET_ADDR, auth_info.user_id, auth_info.device_id, auth_info.access_token, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; let response = identity_client .get_inbound_keys_for_user(InboundKeysForUserRequest { user_id: get_inbound_keys_request_info.user_id, }) .await? .into_inner(); let inbound_key_info: Vec = response .devices .into_values() .map(InboundKeyInfoResponse::try_from) .collect::, _>>()?; Ok(serde_json::to_string(&inbound_key_info)?) } #[instrument] fn upload_one_time_keys( auth_user_id: String, auth_device_id: String, auth_access_token: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, promise_id: u32, ) { RUNTIME.spawn(async move { let upload_request = UploadOneTimeKeysRequest { content_one_time_prekeys: content_one_time_keys, notif_one_time_prekeys: notif_one_time_keys, }; let auth_info = AuthInfo { access_token: auth_access_token, user_id: auth_user_id, device_id: auth_device_id, }; let result = upload_one_time_keys_helper(auth_info, upload_request).await; handle_void_result_as_callback(result, promise_id); }); } async fn upload_one_time_keys_helper( auth_info: AuthInfo, upload_request: UploadOneTimeKeysRequest, ) -> Result<(), Error> { let mut identity_client = get_auth_client( IDENTITY_SOCKET_ADDR, auth_info.user_id, auth_info.device_id, auth_info.access_token, CODE_VERSION, DEVICE_TYPE.as_str_name().to_lowercase(), ) .await?; identity_client.upload_one_time_keys(upload_request).await?; Ok(()) } #[derive( Debug, derive_more::Display, derive_more::From, derive_more::Error, )] pub enum Error { #[display(...)] TonicGRPC(Status), #[display(...)] SerdeJson(serde_json::Error), #[display(...)] MissingResponseData, GRPClient(grpc_clients::error::Error), } #[cfg(test)] mod tests { use super::{BACKUP_SOCKET_ADDR, CODE_VERSION, IDENTITY_SOCKET_ADDR}; #[test] fn test_code_version_exists() { assert!(CODE_VERSION > 0); } #[test] fn test_identity_socket_addr_exists() { assert!(IDENTITY_SOCKET_ADDR.len() > 0); assert!(BACKUP_SOCKET_ADDR.len() > 0); } } diff --git a/services/backup/src/config.rs b/services/backup/src/config.rs index 661985e85..e63bfa3ec 100644 --- a/services/backup/src/config.rs +++ b/services/backup/src/config.rs @@ -1,44 +1,44 @@ use clap::Parser; use once_cell::sync::Lazy; use tracing::info; use crate::constants::{DEFAULT_BLOB_SERVICE_URL, DEFAULT_HTTP_PORT}; #[derive(Parser)] #[command(version, about, long_about = None)] pub struct AppConfig { /// HTTP server listening port #[arg(long, default_value_t = DEFAULT_HTTP_PORT)] pub http_port: u16, /// AWS Localstack service URL #[arg(env = "LOCALSTACK_ENDPOINT")] #[arg(long)] pub localstack_endpoint: Option, /// Blob service URL #[arg(env = "BLOB_SERVICE_URL")] #[arg(long, default_value = DEFAULT_BLOB_SERVICE_URL)] pub blob_service_url: reqwest::Url, } /// Stores configuration parsed from command-line arguments /// and environment variables -pub static CONFIG: Lazy = Lazy::new(|| AppConfig::parse()); +pub static CONFIG: Lazy = Lazy::new(AppConfig::parse); /// Processes the command-line arguments and environment variables. /// Should be called at the beginning of the `main()` function. pub(super) fn parse_cmdline_args() { // force evaluation of the lazy initialized config Lazy::force(&CONFIG); } /// Provides region/credentials configuration for AWS SDKs pub async fn load_aws_config() -> aws_types::SdkConfig { let mut config_builder = aws_config::from_env(); if let Some(endpoint) = &CONFIG.localstack_endpoint { info!("Using Localstack. AWS endpoint URL: {}", endpoint); config_builder = config_builder.endpoint_url(endpoint); } config_builder.load().await } diff --git a/services/backup/src/database/mod.rs b/services/backup/src/database/mod.rs index 394e7feb2..bffbe78e1 100644 --- a/services/backup/src/database/mod.rs +++ b/services/backup/src/database/mod.rs @@ -1,283 +1,284 @@ pub mod backup_item; pub mod log_item; use self::{ backup_item::{BackupItem, OrderedBackupItem}, log_item::LogItem, }; use crate::constants::{backup_table, log_table, LOG_DEFAULT_PAGE_SIZE}; use aws_sdk_dynamodb::{ operation::get_item::GetItemOutput, types::{AttributeValue, ReturnValue}, }; use comm_lib::database::{parse_int_attribute, Error}; use tracing::{error, trace, warn}; #[derive(Clone)] pub struct DatabaseClient { client: aws_sdk_dynamodb::Client, } impl DatabaseClient { pub fn new(aws_config: &aws_types::SdkConfig) -> Self { DatabaseClient { client: aws_sdk_dynamodb::Client::new(aws_config), } } } /// Backup functions impl DatabaseClient { pub async fn put_backup_item( &self, backup_item: BackupItem, ) -> Result<(), Error> { let item = backup_item.into(); self .client .put_item() .table_name(backup_table::TABLE_NAME) .set_item(Some(item)) .send() .await .map_err(|e| { error!("DynamoDB client failed to put backup item"); Error::AwsSdk(e.into()) })?; Ok(()) } pub async fn find_backup_item( &self, user_id: &str, backup_id: &str, ) -> Result, Error> { let item_key = BackupItem::item_key(user_id, backup_id); let output = self .client .get_item() .table_name(backup_table::TABLE_NAME) .set_key(Some(item_key)) .send() .await .map_err(|e| { error!("DynamoDB client failed to find backup item"); Error::AwsSdk(e.into()) })?; let GetItemOutput { item: Some(item), .. - } = output else { - return Ok(None) + } = output + else { + return Ok(None); }; let backup_item = item.try_into()?; Ok(Some(backup_item)) } pub async fn find_last_backup_item( &self, user_id: &str, ) -> Result, Error> { let response = self .client .query() .table_name(backup_table::TABLE_NAME) .index_name(backup_table::CREATED_INDEX) .key_condition_expression("#userID = :valueToMatch") .expression_attribute_names("#userID", backup_table::attr::USER_ID) .expression_attribute_values( ":valueToMatch", AttributeValue::S(user_id.to_string()), ) .limit(1) .scan_index_forward(false) .send() .await .map_err(|e| { error!("DynamoDB client failed to find last backup"); Error::AwsSdk(e.into()) })?; match response.items.unwrap_or_default().pop() { Some(item) => { let backup_item = item.try_into()?; Ok(Some(backup_item)) } None => Ok(None), } } pub async fn remove_backup_item( &self, user_id: &str, backup_id: &str, ) -> Result, Error> { let item_key = BackupItem::item_key(user_id, backup_id); let response = self .client .delete_item() .table_name(backup_table::TABLE_NAME) .set_key(Some(item_key)) .return_values(ReturnValue::AllOld) .send() .await .map_err(|e| { error!("DynamoDB client failed to remove backup item"); Error::AwsSdk(e.into()) })?; response .attributes .map(BackupItem::try_from) .transpose() .map_err(Error::from) } /// For the purposes of the initial backup version this function /// removes all backups except for the latest one pub async fn remove_old_backups( &self, user_id: &str, ) -> Result, Error> { let response = self .client .query() .table_name(backup_table::TABLE_NAME) .index_name(backup_table::CREATED_INDEX) .key_condition_expression("#userID = :valueToMatch") .expression_attribute_names("#userID", backup_table::attr::USER_ID) .expression_attribute_values( ":valueToMatch", AttributeValue::S(user_id.to_string()), ) .scan_index_forward(false) .send() .await .map_err(|e| { error!("DynamoDB client failed to fetch backups"); Error::AwsSdk(e.into()) })?; if response.last_evaluated_key().is_some() { // In the intial version of the backup service this function will be run // for every new backup (each user only has one backup), so this shouldn't // happen warn!("Not all old backups have been cleaned up"); } let items = response .items .unwrap_or_default() .into_iter() .map(OrderedBackupItem::try_from) .collect::, _>>()?; let mut removed_backups = vec![]; let Some(latest) = items.iter().map(|item| item.created).max() else { return Ok(removed_backups); }; for item in items { if item.created == latest { trace!( "Skipping removal of the latest backup item: {}", item.backup_id ); continue; } trace!("Removing backup item: {item:?}"); if let Some(backup) = self.remove_backup_item(user_id, &item.backup_id).await? { removed_backups.push(backup); } else { warn!("Backup was found during query, but wasn't found when deleting") }; } Ok(removed_backups) } } /// Backup log functions impl DatabaseClient { pub async fn put_log_item(&self, log_item: LogItem) -> Result<(), Error> { let item = log_item.into(); self .client .put_item() .table_name(log_table::TABLE_NAME) .set_item(Some(item)) .send() .await .map_err(|e| { error!("DynamoDB client failed to put log item"); Error::AwsSdk(e.into()) })?; Ok(()) } pub async fn fetch_log_items( &self, backup_id: &str, from_id: Option, ) -> Result<(Vec, Option), Error> { let mut query = self .client .query() .table_name(log_table::TABLE_NAME) .key_condition_expression("#backupID = :valueToMatch") .expression_attribute_names("#backupID", log_table::attr::BACKUP_ID) .expression_attribute_values( ":valueToMatch", AttributeValue::S(backup_id.to_string()), ) .limit(LOG_DEFAULT_PAGE_SIZE); if let Some(from_id) = from_id { query = query .exclusive_start_key( log_table::attr::BACKUP_ID, AttributeValue::S(backup_id.to_string()), ) .exclusive_start_key( log_table::attr::LOG_ID, AttributeValue::N(from_id.to_string()), ); } let response = query.send().await.map_err(|e| { error!("DynamoDB client failed to fetch logs"); Error::AwsSdk(e.into()) })?; let last_id = response .last_evaluated_key() .map(|key| { parse_int_attribute( log_table::attr::LOG_ID, key.get(log_table::attr::LOG_ID).cloned(), ) }) .transpose()?; let items = response .items .unwrap_or_default() .into_iter() .map(LogItem::try_from) .collect::, _>>()?; Ok((items, last_id)) } } diff --git a/services/backup/src/http/handlers/backup.rs b/services/backup/src/http/handlers/backup.rs index b2e39dc1a..bb8f76349 100644 --- a/services/backup/src/http/handlers/backup.rs +++ b/services/backup/src/http/handlers/backup.rs @@ -1,317 +1,317 @@ use actix_web::{ error::ErrorBadRequest, web::{self, Bytes}, HttpResponse, Responder, }; use comm_lib::{ auth::UserIdentity, backup::LatestBackupIDResponse, blob::{client::BlobServiceClient, types::BlobInfo}, http::multipart::{get_named_text_field, get_text_field}, tools::Defer, }; use std::convert::Infallible; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; use tracing::{info, instrument, trace, warn}; use crate::{ database::{backup_item::BackupItem, DatabaseClient}, error::BackupError, }; #[instrument(skip_all, fields(backup_id))] pub async fn upload( user: UserIdentity, blob_client: web::Data, db_client: web::Data, mut multipart: actix_multipart::Multipart, ) -> actix_web::Result { let backup_id = get_named_text_field("backup_id", &mut multipart).await?; tracing::Span::current().record("backup_id", &backup_id); info!("Backup data upload started"); let (user_keys_blob_info, user_keys_revoke) = forward_field_to_blob( &mut multipart, &blob_client, "user_keys_hash", "user_keys", ) .await?; let (user_data_blob_info, user_data_revoke) = forward_field_to_blob( &mut multipart, &blob_client, "user_data_hash", "user_data", ) .await?; let attachments_hashes: Vec = match get_text_field(&mut multipart).await? { Some((name, attachments)) => { if name != "attachments" { warn!( name, "Malformed request: 'attachments' text field expected." ); return Err(ErrorBadRequest("Bad request")); } attachments.lines().map(ToString::to_string).collect() } None => Vec::new(), }; let mut attachments = Vec::new(); let mut attachments_revokes = Vec::new(); for attachment_hash in attachments_hashes { let (holder, revoke) = create_attachment_holder(&attachment_hash, &blob_client).await?; attachments.push(BlobInfo { blob_hash: attachment_hash, holder, }); attachments_revokes.push(revoke); } let item = BackupItem::new( user.user_id.clone(), backup_id, user_keys_blob_info, user_data_blob_info, attachments, ); db_client .put_backup_item(item) .await .map_err(BackupError::from)?; user_keys_revoke.cancel(); user_data_revoke.cancel(); for attachment_revoke in attachments_revokes { attachment_revoke.cancel(); } for backup in db_client .remove_old_backups(&user.user_id) .await .map_err(BackupError::from)? { backup.revoke_holders(&blob_client).await; } Ok(HttpResponse::Ok().finish()) } #[instrument(skip_all, fields(hash_field_name, data_field_name))] async fn forward_field_to_blob<'revoke, 'blob: 'revoke>( multipart: &mut actix_multipart::Multipart, blob_client: &'blob web::Data, hash_field_name: &str, data_field_name: &str, ) -> actix_web::Result<(BlobInfo, Defer<'revoke>)> { trace!("Reading blob fields: {hash_field_name:?}, {data_field_name:?}"); let blob_hash = get_named_text_field(hash_field_name, multipart).await?; let Some(mut field) = multipart.try_next().await? else { warn!("Malformed request: expected a field."); return Err(ErrorBadRequest("Bad request"))?; }; if field.name() != data_field_name { warn!( hash_field_name, "Malformed request: '{data_field_name}' data field expected." ); return Err(ErrorBadRequest("Bad request"))?; } let blob_info = BlobInfo { blob_hash, holder: uuid::Uuid::new_v4().to_string(), }; // [`actix_multipart::Multipart`] isn't [`std::marker::Send`], and so we cannot pass it to the blob client directly. // Instead we have to forward it to a channel and create stream from the receiver. let (tx, rx) = tokio::sync::mpsc::channel(1); let receive_promise = async move { trace!("Receiving blob data"); // [`actix_multipart::MultipartError`] isn't [`std::marker::Send`] so we return it here, and pass [`Infallible`] // as the error to the channel while let Some(chunk) = field.try_next().await? { if let Err(err) = tx.send(Result::::Ok(chunk)).await { warn!("Error when sending data through a channel: '{err}'"); // Error here means that the channel has been closed from the blob client side. We don't want to return an error // here, because `tokio::try_join!` only returns the first error it receives and we want to prioritize the backup // client error. break; } } trace!("Finished receiving blob data"); Result::<(), actix_web::Error>::Ok(()) }; let data_stream = ReceiverStream::new(rx); let send_promise = async { blob_client .simple_put(&blob_info.blob_hash, &blob_info.holder, data_stream) .await .map_err(BackupError::from)?; Ok(()) }; tokio::try_join!(receive_promise, send_promise)?; let revoke_info = blob_info.clone(); let revoke_holder = Defer::new(|| { blob_client .schedule_revoke_holder(revoke_info.blob_hash, revoke_info.holder) }); Ok((blob_info, revoke_holder)) } #[instrument(skip_all)] async fn create_attachment_holder<'revoke, 'blob: 'revoke>( attachment: &str, blob_client: &'blob web::Data, ) -> Result<(String, Defer<'revoke>), BackupError> { let holder = uuid::Uuid::new_v4().to_string(); if !blob_client .assign_holder(attachment, &holder) .await .map_err(BackupError::from)? { warn!("Blob attachment with hash {attachment:?} doesn't exist"); } let revoke_hash = attachment.to_string(); let revoke_holder = holder.clone(); let revoke_holder = Defer::new(|| { blob_client.schedule_revoke_holder(revoke_hash, revoke_holder) }); Ok((holder, revoke_holder)) } #[instrument(skip_all, fields(backup_id = %path))] pub async fn download_user_keys( user: UserIdentity, path: web::Path, blob_client: web::Data, db_client: web::Data, ) -> actix_web::Result { info!("Download user keys request"); let backup_id = path.into_inner(); download_user_blob( |item| &item.user_keys, &user.user_id, &backup_id, blob_client, db_client, ) .await } #[instrument(skip_all, fields(backup_id = %path))] pub async fn download_user_data( user: UserIdentity, path: web::Path, blob_client: web::Data, db_client: web::Data, ) -> actix_web::Result { info!("Download user data request"); let backup_id = path.into_inner(); download_user_blob( |item| &item.user_data, &user.user_id, &backup_id, blob_client, db_client, ) .await } pub async fn download_user_blob( data_extractor: impl FnOnce(&BackupItem) -> &BlobInfo, user_id: &str, backup_id: &str, blob_client: web::Data, db_client: web::Data, ) -> actix_web::Result { let backup_item = db_client .find_backup_item(user_id, backup_id) .await .map_err(BackupError::from)? .ok_or(BackupError::NoBackup)?; let stream = blob_client .get(&data_extractor(&backup_item).blob_hash) .await .map_err(BackupError::from)?; Ok( HttpResponse::Ok() .content_type("application/octet-stream") .streaming(stream), ) } #[instrument(skip_all, fields(username = %path))] pub async fn get_latest_backup_id( path: web::Path, db_client: web::Data, ) -> actix_web::Result { let username = path.into_inner(); // Treat username as user_id in the initial version let user_id = username; let Some(backup_item) = db_client .find_last_backup_item(&user_id) .await - .map_err(BackupError::from)? else - { + .map_err(BackupError::from)? + else { return Err(BackupError::NoBackup.into()); }; let response = LatestBackupIDResponse { backup_id: backup_item.backup_id, }; Ok(web::Json(response)) } #[instrument(skip_all, fields(username = %path))] pub async fn download_latest_backup_keys( path: web::Path, db_client: web::Data, blob_client: web::Data, ) -> actix_web::Result { let username = path.into_inner(); // Treat username as user_id in the initial version let user_id = username; let Some(backup_item) = db_client .find_last_backup_item(&user_id) .await - .map_err(BackupError::from)? else - { + .map_err(BackupError::from)? + else { return Err(BackupError::NoBackup.into()); }; let stream = blob_client .get(&backup_item.user_keys.blob_hash) .await .map_err(BackupError::from)?; Ok( HttpResponse::Ok() .content_type("application/octet-stream") .streaming(stream), ) } diff --git a/services/backup/src/http/handlers/log.rs b/services/backup/src/http/handlers/log.rs index 07902552d..dd9a3890e 100644 --- a/services/backup/src/http/handlers/log.rs +++ b/services/backup/src/http/handlers/log.rs @@ -1,307 +1,307 @@ use crate::constants::WS_FRAME_SIZE; use crate::database::{log_item::LogItem, DatabaseClient}; use actix::{Actor, ActorContext, ActorFutureExt, AsyncContext, StreamHandler}; use actix_http::ws::{CloseCode, Item}; use actix_web::{ web::{self, Bytes, BytesMut}, Error, HttpRequest, HttpResponse, }; use actix_web_actors::ws::{self, WebsocketContext}; use comm_lib::{ backup::{ DownloadLogsRequest, LogWSRequest, LogWSResponse, UploadLogRequest, }, blob::{ client::{BlobServiceClient, BlobServiceError}, types::BlobInfo, }, database::{self, blob::BlobOrDBContent}, }; use std::{ sync::Arc, time::{Duration, Instant}, }; use tracing::{error, info, instrument, warn}; pub async fn handle_ws( path: web::Path, req: HttpRequest, stream: web::Payload, blob_client: web::Data, db_client: web::Data, ) -> Result { let backup_id = path.into_inner(); ws::WsResponseBuilder::new( LogWSActor { info: Arc::new(ConnectionInfo { backup_id }), blob_client: blob_client.as_ref().clone(), db_client: db_client.as_ref().clone(), last_msg_time: Instant::now(), buffer: BytesMut::new(), }, &req, stream, ) .frame_size(WS_FRAME_SIZE) .start() } struct ConnectionInfo { backup_id: String, } #[derive( Debug, derive_more::From, derive_more::Display, derive_more::Error, )] enum LogWSError { - BincodeError(bincode::Error), - BlobError(BlobServiceError), - DBError(database::Error), + Bincode(bincode::Error), + Blob(BlobServiceError), + DB(database::Error), } struct LogWSActor { info: Arc, blob_client: BlobServiceClient, db_client: DatabaseClient, last_msg_time: Instant, buffer: BytesMut, } impl LogWSActor { const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5); const CONNECTION_TIMEOUT: Duration = Duration::from_secs(10); fn handle_msg_sync( &self, ctx: &mut WebsocketContext, bytes: Bytes, ) { let fut = Self::handle_msg( self.info.clone(), self.blob_client.clone(), self.db_client.clone(), - bytes.into(), + bytes, ); let fut = actix::fut::wrap_future(fut).map( |responses, _: &mut LogWSActor, ctx: &mut WebsocketContext| { let responses = match responses { Ok(responses) => responses, Err(err) => { error!("Error: {err:?}"); vec![LogWSResponse::ServerError] } }; for response in responses { match bincode::serialize(&response) { Ok(bytes) => ctx.binary(bytes), Err(error) => { error!( "Error serializing a response: {response:?}. Error: {error}" ); } }; } }, ); ctx.spawn(fut); } async fn handle_msg( info: Arc, blob_client: BlobServiceClient, db_client: DatabaseClient, bytes: Bytes, ) -> Result, LogWSError> { let request = bincode::deserialize(&bytes)?; match request { LogWSRequest::UploadLog(UploadLogRequest { log_id, content, attachments, }) => { let mut attachment_blob_infos = Vec::new(); for attachment in attachments.unwrap_or_default() { let blob_info = Self::create_attachment(&blob_client, attachment).await?; attachment_blob_infos.push(blob_info); } let mut log_item = LogItem { backup_id: info.backup_id.clone(), log_id, content: BlobOrDBContent::new(content), attachments: attachment_blob_infos, }; log_item.ensure_size_constraints(&blob_client).await?; db_client.put_log_item(log_item).await?; Ok(vec![LogWSResponse::LogUploaded { log_id }]) } LogWSRequest::DownloadLogs(DownloadLogsRequest { from_id }) => { let (log_items, last_id) = db_client.fetch_log_items(&info.backup_id, from_id).await?; let mut messages = vec![]; for LogItem { log_id, content, attachments, .. } in log_items { let content = content.fetch_bytes(&blob_client).await?; let attachments: Vec = attachments.into_iter().map(|att| att.blob_hash).collect(); let attachments = if attachments.is_empty() { None } else { Some(attachments) }; messages.push(LogWSResponse::LogDownload { log_id, content, attachments, }) } messages.push(LogWSResponse::LogDownloadFinished { last_log_id: last_id, }); Ok(messages) } } } async fn create_attachment( blob_client: &BlobServiceClient, attachment: String, ) -> Result { let blob_info = BlobInfo { blob_hash: attachment, holder: uuid::Uuid::new_v4().to_string(), }; if !blob_client .assign_holder(&blob_info.blob_hash, &blob_info.holder) .await? { warn!( "Blob attachment with hash {:?} doesn't exist", blob_info.blob_hash ); } Ok(blob_info) } } impl Actor for LogWSActor { type Context = ws::WebsocketContext; #[instrument(skip_all, fields(backup_id = self.info.backup_id))] fn started(&mut self, ctx: &mut Self::Context) { info!("Socket opened"); ctx.run_interval(Self::HEARTBEAT_INTERVAL, |actor, ctx| { if Instant::now().duration_since(actor.last_msg_time) > Self::CONNECTION_TIMEOUT { warn!("Socket timeout, closing connection"); ctx.stop(); return; } ctx.ping(&[]); }); } #[instrument(skip_all, fields(backup_id = self.info.backup_id))] fn stopped(&mut self, _: &mut Self::Context) { info!("Socket closed"); } } impl StreamHandler> for LogWSActor { #[instrument(skip_all, fields(backup_id = self.info.backup_id))] fn handle( &mut self, msg: Result, ctx: &mut Self::Context, ) { let msg = match msg { Ok(msg) => msg, Err(err) => { warn!("Error during socket message handling: {err}"); ctx.close(Some(CloseCode::Error.into())); ctx.stop(); return; } }; self.last_msg_time = Instant::now(); match msg { ws::Message::Binary(bytes) => self.handle_msg_sync(ctx, bytes), // Continuations - this is mostly boilerplate code. Some websocket // clients may split a message into these ones ws::Message::Continuation(Item::FirstBinary(bytes)) => { if !self.buffer.is_empty() { warn!("Socket received continuation before previous was completed"); ctx.close(Some(CloseCode::Error.into())); ctx.stop(); return; } self.buffer.extend_from_slice(&bytes); } ws::Message::Continuation(Item::Continue(bytes)) => { if self.buffer.is_empty() { warn!("Socket received continuation message before it was started"); ctx.close(Some(CloseCode::Error.into())); ctx.stop(); return; } self.buffer.extend_from_slice(&bytes); } ws::Message::Continuation(Item::Last(bytes)) => { if self.buffer.is_empty() { warn!( "Socket received last continuation message before it was started" ); ctx.close(Some(CloseCode::Error.into())); ctx.stop(); return; } self.buffer.extend_from_slice(&bytes); let bytes = self.buffer.split(); self.handle_msg_sync(ctx, bytes.into()); } // Heartbeat ws::Message::Ping(message) => ctx.pong(&message), ws::Message::Pong(_) => (), // Other ws::Message::Text(_) | ws::Message::Continuation(Item::FirstText(_)) => { warn!("Socket received unsupported message"); ctx.close(Some(CloseCode::Unsupported.into())); ctx.stop(); } ws::Message::Close(reason) => { info!("Socket was closed"); ctx.close(reason); ctx.stop(); } ws::Message::Nop => (), } } } diff --git a/services/blob/src/database/client.rs b/services/blob/src/database/client.rs index 1429bc919..294695c9b 100644 --- a/services/blob/src/database/client.rs +++ b/services/blob/src/database/client.rs @@ -1,413 +1,417 @@ use aws_sdk_dynamodb::{ operation::put_item::PutItemOutput, types::{ AttributeValue, Delete, DeleteRequest, PutRequest, TransactWriteItem, Update, WriteRequest, }, Error as DynamoDBError, }; use chrono::Utc; use comm_lib::database::{ self, batch_operations::ExponentialBackoffConfig, TryFromAttribute, }; use std::collections::HashMap; use tracing::{debug, error, trace}; use crate::constants::db::*; use super::errors::{BlobDBError, Error as DBError}; use super::types::*; #[derive(Clone)] pub struct DatabaseClient { ddb: aws_sdk_dynamodb::Client, } /// public interface implementation impl DatabaseClient { pub fn new(aws_config: &aws_config::SdkConfig) -> Self { DatabaseClient { ddb: aws_sdk_dynamodb::Client::new(aws_config), } } /// Gets a blob item row from the database by its blob hash /// Returns None if the blob item is not found pub async fn get_blob_item( &self, blob_hash: impl Into, ) -> DBResult> { let key = PrimaryKey::for_blob_item(blob_hash); self .get_raw_item(key.clone()) .await? .map(BlobItemRow::try_from) .transpose() } /// Inserts a new blob item row into the database. Returns Error /// if the item already exists. pub async fn put_blob_item(&self, blob_item: BlobItemInput) -> DBResult<()> { let item = HashMap::from([ ( ATTR_BLOB_HASH.to_string(), AttributeValue::S(blob_item.blob_hash), ), ( ATTR_HOLDER.to_string(), AttributeValue::S(BLOB_ITEM_ROW_HOLDER_VALUE.into()), ), ( ATTR_S3_PATH.to_string(), AttributeValue::S(blob_item.s3_path.to_full_path()), ), (ATTR_UNCHECKED.to_string(), UncheckedKind::Blob.into()), ]); self.insert_item(item).await?; Ok(()) } /// Deletes blob item row. Doesn't delete its holders. pub async fn delete_blob_item( &self, blob_hash: impl Into, ) -> DBResult<()> { let key = PrimaryKey::for_blob_item(blob_hash); self .ddb .delete_item() .table_name(BLOB_TABLE_NAME) .set_key(Some(key.into())) .send() .await .map_err(|err| { debug!("DynamoDB client failed to delete blob item: {:?}", err); DBError::AwsSdk(err.into()) })?; Ok(()) } // Inserts a new holder assignment row into the database. Returns Error // if the item already exists or holder format is invalid. pub async fn put_holder_assignment( &self, blob_hash: impl Into, holder: impl Into, ) -> DBResult<()> { let blob_hash: String = blob_hash.into(); let holder: String = holder.into(); validate_holder(&holder)?; let item = HashMap::from([ (ATTR_BLOB_HASH.to_string(), AttributeValue::S(blob_hash)), (ATTR_HOLDER.to_string(), AttributeValue::S(holder)), (ATTR_UNCHECKED.to_string(), UncheckedKind::Holder.into()), ]); self.insert_item(item).await?; Ok(()) } /// Deletes a holder assignment row from the table. /// If the blob item for given holder assignment exists, it will be marked as unchecked. /// /// Returns Error if the holder format is invalid or race condition happened. /// Doesn't fail if the holder assignment didn't exist before. pub async fn delete_holder_assignment( &self, blob_hash: impl Into, holder: impl Into, ) -> DBResult<()> { let blob_hash: String = blob_hash.into(); let holder: String = holder.into(); validate_holder(&holder)?; let mut transaction = Vec::new(); // delete the holder row let assignment_key = PrimaryKey { blob_hash: blob_hash.clone(), holder, }; let delete_request = Delete::builder() .table_name(BLOB_TABLE_NAME) .set_key(Some(assignment_key.into())) .build(); transaction .push(TransactWriteItem::builder().delete(delete_request).build()); // mark the blob item as unchecked if exists let blob_primary_key = PrimaryKey::for_blob_item(blob_hash); if self.get_raw_item(blob_primary_key.clone()).await?.is_some() { let update_request = Update::builder() .table_name(BLOB_TABLE_NAME) .set_key(Some(blob_primary_key.into())) // even though we checked that the blob item exists, we still need to check it again // using DDB built-in conditions in case it was deleted in meantime .condition_expression( "attribute_exists(#blob_hash) AND attribute_exists(#holder)", ) .update_expression("SET #unchecked = :unchecked, #last_modified = :now") .expression_attribute_names("#blob_hash", ATTR_BLOB_HASH) .expression_attribute_names("#holder", ATTR_HOLDER) .expression_attribute_names("#unchecked", ATTR_UNCHECKED) .expression_attribute_names("#last_modified", ATTR_LAST_MODIFIED) .expression_attribute_values(":unchecked", UncheckedKind::Blob.into()) .expression_attribute_values( ":now", AttributeValue::N(Utc::now().timestamp_millis().to_string()), ) .build(); transaction .push(TransactWriteItem::builder().update(update_request).build()); } self .ddb .transact_write_items() .set_transact_items(Some(transaction)) .send() .await .map_err(|err| { debug!("DynamoDB client failed to run transaction: {:?}", err); DBError::AwsSdk(err.into()) })?; Ok(()) } /// Queries the table for a list of holders for given blob hash. /// Optionally limits the number of results. pub async fn list_blob_holders( &self, blob_hash: impl Into, limit: Option, ) -> DBResult> { let response = self .ddb .query() .table_name(BLOB_TABLE_NAME) .projection_expression("#holder") .key_condition_expression("#blob_hash = :blob_hash") .expression_attribute_names("#blob_hash", ATTR_BLOB_HASH) .expression_attribute_names("#holder", ATTR_HOLDER) .expression_attribute_values( ":blob_hash", AttributeValue::S(blob_hash.into()), ) .consistent_read(true) // we need to increase limit by 1 because the blob item itself can be fetched too // it is filtered-out later .set_limit(limit.map(|it| it + 1)) .send() .await .map_err(|err| { error!("DynamoDB client failed to query holders: {:?}", err); DBError::AwsSdk(err.into()) })?; - let Some(items) = response.items else { return Ok(vec![]); }; + let Some(items) = response.items else { + return Ok(vec![]); + }; items .into_iter() .filter_map(|mut row| { // filter out rows that are blob items // we cannot do it in key condition expression - it doesn't support the <> operator // filter expression doesn't work either - it doesn't support filtering by sort key match String::try_from_attr(ATTR_HOLDER, row.remove(ATTR_HOLDER)) { Ok(value) if value.as_str() == BLOB_ITEM_ROW_HOLDER_VALUE => None, holder => Some(holder), } }) .collect::, _>>() .map_err(DBError::Attribute) } /// Returns a list of primary keys for rows that already exist in the table pub async fn list_existing_keys( &self, keys: impl IntoIterator, ) -> DBResult> { database::batch_operations::batch_get( &self.ddb, BLOB_TABLE_NAME, keys, Some(format!("{}, {}", ATTR_BLOB_HASH, ATTR_HOLDER)), ExponentialBackoffConfig::default(), ) .await? .into_iter() .map(PrimaryKey::try_from) .collect::, _>>() } /// Returns a list of primary keys for "unchecked" items (blob / holder) /// that were last modified at least `min_age` ago. /// We need to specify if we want to get blob or holder items. pub async fn find_unchecked_items( &self, kind: UncheckedKind, min_age: chrono::Duration, ) -> DBResult> { let created_until = Utc::now() - min_age; let timestamp = created_until.timestamp_millis(); let response = self .ddb .query() .table_name(BLOB_TABLE_NAME) .index_name(UNCHECKED_INDEX_NAME) .key_condition_expression( "#unchecked = :kind AND #last_modified < :timestamp", ) .expression_attribute_names("#unchecked", ATTR_UNCHECKED) .expression_attribute_names("#last_modified", ATTR_LAST_MODIFIED) .expression_attribute_values(":kind", kind.into()) .expression_attribute_values( ":timestamp", AttributeValue::N(timestamp.to_string()), ) .send() .await .map_err(|err| { error!("DynamoDB client failed to query unchecked items: {:?}", err); DBError::AwsSdk(err.into()) })?; - let Some(items) = response.items else { return Ok(vec![]); }; + let Some(items) = response.items else { + return Ok(vec![]); + }; items .into_iter() .map(PrimaryKey::try_from) .collect::, _>>() } /// For all rows in specified set of primary keys, removes /// the "unchecked" attribute using PutItem operation in batch. pub async fn batch_mark_checked( &self, keys: impl IntoIterator, ) -> DBResult<()> { let items_to_mark = database::batch_operations::batch_get( &self.ddb, BLOB_TABLE_NAME, keys, None, ExponentialBackoffConfig::default(), ) .await?; let write_requests = items_to_mark .into_iter() .filter_map(|mut row| { // filter out rows that are already checked // to save some write capacity row.remove(ATTR_UNCHECKED)?; let put_request = PutRequest::builder().set_item(Some(row)).build(); let request = WriteRequest::builder().put_request(put_request).build(); Some(request) }) .collect(); database::batch_operations::batch_write( &self.ddb, BLOB_TABLE_NAME, write_requests, ExponentialBackoffConfig::default(), ) .await?; Ok(()) } /// Performs multiple DeleteItem operations in batch pub async fn batch_delete_rows( &self, keys: impl IntoIterator, ) -> DBResult<()> { let write_requests = keys .into_iter() .map(|key| DeleteRequest::builder().set_key(Some(key.into())).build()) .map(|request| WriteRequest::builder().delete_request(request).build()) .collect::>(); database::batch_operations::batch_write( &self.ddb, BLOB_TABLE_NAME, write_requests, ExponentialBackoffConfig::default(), ) .await?; Ok(()) } } // private helpers impl DatabaseClient { /// inserts a new item into the table using PutItem. Returns /// error if the item already exists async fn insert_item( &self, mut item: RawAttributes, ) -> DBResult { // add metadata attributes common for all types of rows let now = Utc::now().timestamp_millis(); item.insert( ATTR_CREATED_AT.to_string(), AttributeValue::N(now.to_string()), ); item.insert( ATTR_LAST_MODIFIED.to_string(), AttributeValue::N(now.to_string()), ); self .ddb .put_item() .table_name(BLOB_TABLE_NAME) .set_item(Some(item)) // make sure we don't accidentaly overwrite existing row .condition_expression( "attribute_not_exists(#blob_hash) AND attribute_not_exists(#holder)", ) .expression_attribute_names("#blob_hash", ATTR_BLOB_HASH) .expression_attribute_names("#holder", ATTR_HOLDER) .send() .await .map_err(|err| match DynamoDBError::from(err) { DynamoDBError::ConditionalCheckFailedException(e) => { debug!("DynamoDB client failed to insert: item already exists"); trace!("Conditional check failed with error: {}", e); DBError::ItemAlreadyExists } err => { debug!("DynamoDB client failed to insert: {:?}", err); DBError::AwsSdk(err) } }) } /// Gets a single row from the table using GetItem, without parsing it async fn get_raw_item( &self, key: PrimaryKey, ) -> DBResult> { self .ddb .get_item() .table_name(BLOB_TABLE_NAME) .set_key(Some(key.into())) .send() .await .map_err(|err| { debug!("DynamoDB client failed to get item: {:?}", err); DBError::AwsSdk(err.into()) }) .map(|response| response.item) } } fn validate_holder(holder: &str) -> DBResult<()> { if holder == BLOB_ITEM_ROW_HOLDER_VALUE { debug!("Invalid holder: {}", holder); return Err(DBError::Blob(BlobDBError::InvalidInput(holder.to_string()))); } Ok(()) } diff --git a/services/blob/src/database/types.rs b/services/blob/src/database/types.rs index f1b23277c..64a851ae1 100644 --- a/services/blob/src/database/types.rs +++ b/services/blob/src/database/types.rs @@ -1,241 +1,238 @@ use aws_sdk_dynamodb::types::AttributeValue; use chrono::{DateTime, Utc}; use comm_lib::database::{ parse_timestamp_attribute, AttributeTryInto, DBItemError, Value, }; use derive_more::Constructor; use std::collections::HashMap; use crate::{config::CONFIG, constants::db::*, s3::S3Path}; use super::errors::Error as DBError; /// Represents a database row in the DynamoDB SDK format pub(super) type RawAttributes = HashMap; /// A convenience Result type for database operations pub(super) type DBResult = Result; /// Represents a type-safe version of a DynamoDB blob table item. /// Each row can be either a blob item or a holder assignment. /// /// It implements the `TryFrom` trait to convert from raw DynamoDB /// `AttributeValue`s to the type-safe version. pub enum DBRow { BlobItem(BlobItemRow), HolderAssignment(HolderAssignmentRow), } impl TryFrom for DBRow { type Error = DBError; fn try_from(attributes: RawAttributes) -> Result { let holder: String = attributes .get(ATTR_HOLDER) .cloned() .attr_try_into(ATTR_HOLDER)?; let row = match holder.as_str() { BLOB_ITEM_ROW_HOLDER_VALUE => DBRow::BlobItem(attributes.try_into()?), _ => DBRow::HolderAssignment(attributes.try_into()?), }; Ok(row) } } /// Represents an input payload for inserting a blob item into the database. /// This contains only the business logic related attributes #[derive(Debug)] pub struct BlobItemInput { pub blob_hash: String, pub s3_path: S3Path, } impl BlobItemInput { pub fn new(blob_hash: impl Into) -> Self { let blob_hash: String = blob_hash.into(); BlobItemInput { blob_hash: blob_hash.clone(), s3_path: S3Path { bucket_name: CONFIG.s3_bucket_name.clone(), object_name: blob_hash, }, } } } /// A struct representing a blob item row in the table in a type-safe way /// /// It implements the `TryFrom` trait to convert from raw DynamoDB /// `AttributeValue`s to the type-safe version. #[derive(Debug)] pub struct BlobItemRow { pub blob_hash: String, pub s3_path: S3Path, pub unchecked: bool, pub created_at: DateTime, pub last_modified: DateTime, } impl TryFrom for BlobItemRow { type Error = DBError; fn try_from(mut attributes: RawAttributes) -> Result { let blob_hash = attributes .remove(ATTR_BLOB_HASH) .attr_try_into(ATTR_BLOB_HASH)?; let s3_path: String = attributes .remove(ATTR_S3_PATH) .attr_try_into(ATTR_S3_PATH)?; let created_at = parse_timestamp_attribute( ATTR_CREATED_AT, attributes.remove(ATTR_CREATED_AT), )?; let last_modified = parse_timestamp_attribute( ATTR_LAST_MODIFIED, attributes.remove(ATTR_LAST_MODIFIED), )?; let unchecked = is_raw_row_unchecked(&attributes, UncheckedKind::Blob)?; let s3_path = S3Path::from_full_path(&s3_path).map_err(DBError::from)?; Ok(BlobItemRow { blob_hash, s3_path, unchecked, created_at, last_modified, }) } } /// A struct representing a holder assignment table row in a type-safe way /// /// It implements the `TryFrom` trait to convert from raw DynamoDB /// `AttributeValue`s to the type-safe version. #[derive(Debug)] pub struct HolderAssignmentRow { pub blob_hash: String, pub holder: String, pub unchecked: bool, pub created_at: DateTime, pub last_modified: DateTime, } impl TryFrom for HolderAssignmentRow { type Error = DBError; fn try_from(mut attributes: RawAttributes) -> Result { let holder = attributes.remove(ATTR_HOLDER).attr_try_into(ATTR_HOLDER)?; let blob_hash = attributes .remove(ATTR_BLOB_HASH) .attr_try_into(ATTR_BLOB_HASH)?; let created_at = parse_timestamp_attribute( ATTR_CREATED_AT, attributes.remove(ATTR_CREATED_AT), )?; let last_modified = parse_timestamp_attribute( ATTR_LAST_MODIFIED, attributes.remove(ATTR_LAST_MODIFIED), )?; let unchecked = is_raw_row_unchecked(&attributes, UncheckedKind::Holder)?; Ok(HolderAssignmentRow { blob_hash, holder, unchecked, created_at, last_modified, }) } } /// Represents a composite primary key for a DynamoDB table row /// /// It implements `TryFrom` and `Into` traits to conveniently use it /// in DynamoDB queries #[derive(Clone, Constructor, Debug, Hash, Eq, PartialEq)] pub struct PrimaryKey { pub blob_hash: String, pub holder: String, } impl PrimaryKey { /// Creates a primary key for a row containing a blob item data /// Rows queried by primary keys created by this function will /// be of type `BlobItemRow` pub fn for_blob_item(blob_hash: impl Into) -> Self { PrimaryKey { blob_hash: blob_hash.into(), holder: BLOB_ITEM_ROW_HOLDER_VALUE.to_string(), } } pub fn is_blob_item(&self) -> bool { self.holder == BLOB_ITEM_ROW_HOLDER_VALUE } } impl TryFrom for PrimaryKey { type Error = DBError; fn try_from(mut attributes: RawAttributes) -> Result { let blob_hash = attributes .remove(ATTR_BLOB_HASH) .attr_try_into(ATTR_BLOB_HASH)?; let holder = attributes.remove(ATTR_HOLDER).attr_try_into(ATTR_HOLDER)?; Ok(PrimaryKey { blob_hash, holder }) } } // useful for convenient calls: // ddb.get_item().set_key(Some(partition_key.into())) -impl Into for PrimaryKey { - fn into(self) -> RawAttributes { +impl From for RawAttributes { + fn from(val: PrimaryKey) -> Self { HashMap::from([ - ( - ATTR_BLOB_HASH.to_string(), - AttributeValue::S(self.blob_hash), - ), - (ATTR_HOLDER.to_string(), AttributeValue::S(self.holder)), + (ATTR_BLOB_HASH.to_string(), AttributeValue::S(val.blob_hash)), + (ATTR_HOLDER.to_string(), AttributeValue::S(val.holder)), ]) } } /// Represents possible values for the `unchecked` attribute value pub enum UncheckedKind { Blob, Holder, } impl UncheckedKind { pub fn str_value(&self) -> &'static str { match self { UncheckedKind::Blob => "blob", UncheckedKind::Holder => "holder", } } } -impl Into for UncheckedKind { - fn into(self) -> AttributeValue { - AttributeValue::S(self.str_value().to_string()) +impl From for AttributeValue { + fn from(val: UncheckedKind) -> Self { + AttributeValue::S(val.str_value().to_string()) } } fn is_raw_row_unchecked( row: &RawAttributes, kind: UncheckedKind, ) -> DBResult { let Some(AttributeValue::S(value)) = row.get(ATTR_UNCHECKED) else { // The unchecked attribute not exists return Ok(false); }; if value != kind.str_value() { // The unchecked attribute exists but has an incorrect value return Err(DBError::Attribute(DBItemError::new( ATTR_UNCHECKED.to_string(), Value::String(value.to_string()), comm_lib::database::DBItemAttributeError::IncorrectType, ))); } Ok(true) } diff --git a/services/blob/src/http/handlers/blob.rs b/services/blob/src/http/handlers/blob.rs index b264f3438..77296b3fe 100644 --- a/services/blob/src/http/handlers/blob.rs +++ b/services/blob/src/http/handlers/blob.rs @@ -1,209 +1,210 @@ use crate::http::errors::handle_blob_service_error; use crate::service::BlobService; use crate::validate_identifier; use actix_web::error::{ErrorBadRequest, ErrorRangeNotSatisfiable}; use actix_web::{ http::header::{ByteRangeSpec, Range}, web, HttpResponse, }; use async_stream::try_stream; use comm_lib::http::multipart; use serde::{Deserialize, Serialize}; use tokio_stream::StreamExt; use tracing::{info, instrument, trace, warn}; use tracing_futures::Instrument; /// Returns a tuple of first and last byte number (inclusive) represented by given range header. fn parse_range_header( range_header: &Option>, file_size: u64, ) -> actix_web::Result<(u64, u64)> { let (range_start, range_end): (u64, u64) = match range_header { Some(web::Header(Range::Bytes(ranges))) => { if ranges.len() > 1 { return Err(ErrorBadRequest("Multiple ranges not supported")); } match ranges[0] { ByteRangeSpec::FromTo(start, end) => { if end >= file_size || start > end { return Err(ErrorRangeNotSatisfiable("Range not satisfiable")); } (start, end) } ByteRangeSpec::From(start) => { if start >= file_size { return Err(ErrorRangeNotSatisfiable("Range not satisfiable")); } (start, file_size - 1) } ByteRangeSpec::Last(length) => { if length >= file_size { return Err(ErrorRangeNotSatisfiable("Range not satisfiable")); } (file_size - length, file_size - 1) } } } Some(web::Header(Range::Unregistered(..))) => { return Err(ErrorBadRequest("Use ranges registered at IANA")); } None => (0, file_size - 1), }; Ok((range_start, range_end)) } #[instrument( name = "get_blob", skip_all, fields(blob_hash = %params.as_ref().as_str(), s3_path)) ] pub async fn get_blob_handler( service: web::Data, params: web::Path, range_header: Option>, ) -> actix_web::Result { info!("Get blob request"); let blob_hash = params.into_inner(); validate_identifier!(blob_hash); trace!("Initializing download session"); let mut download = service.create_download(blob_hash).await?; let total_size = download.blob_size; let (range_start, range_end): (u64, u64) = parse_range_header(&range_header, total_size)?; download.set_byte_range(range_start..=range_end); let content_length = download.download_size(); let stream = download .into_stream() .map(|data| match data { Ok(bytes) => Ok(web::Bytes::from(bytes)), Err(err) => { warn!("Error during download stream: {:?}", err); Err(handle_blob_service_error(&err)) } }) .in_current_span(); if range_header.is_some() { return Ok( HttpResponse::PartialContent() .content_type("application/octet-stream") .append_header(("Content-Length", content_length)) .append_header(( "Content-Range", format!("bytes {}-{}/{}", range_start, range_end, total_size), )) .streaming(Box::pin(stream)), ); } Ok( HttpResponse::Ok() .content_type("application/octet-stream") .append_header(("Content-Length", content_length)) .streaming(Box::pin(stream)), ) } #[derive(Deserialize, Debug)] pub struct AssignHolderPayload { holder: String, blob_hash: String, } #[derive(Serialize)] struct AssignHolderResponnse { data_exists: bool, } #[instrument(name = "assign_holder", skip(service))] pub async fn assign_holder_handler( service: web::Data, payload: web::Json, ) -> actix_web::Result { info!("Assign holder request"); let AssignHolderPayload { holder, blob_hash } = payload.into_inner(); validate_identifier!(holder); validate_identifier!(blob_hash); let data_exists = service.assign_holder(blob_hash, holder).await?; Ok(HttpResponse::Ok().json(web::Json(AssignHolderResponnse { data_exists }))) } #[instrument(skip_all, name = "upload_blob", fields(blob_hash))] pub async fn upload_blob_handler( service: web::Data, mut payload: actix_multipart::Multipart, ) -> actix_web::Result { info!("Upload blob request"); - let Some((name, blob_hash)) = multipart::get_text_field(&mut payload).await? else { + let Some((name, blob_hash)) = multipart::get_text_field(&mut payload).await? + else { warn!("Malformed request: expected a field."); return Err(ErrorBadRequest("Bad request")); }; if name != "blob_hash" { warn!(name, "Malformed request: 'blob_hash' text field expected."); return Err(ErrorBadRequest("Bad request")); } validate_identifier!(blob_hash); tracing::Span::current().record("blob_hash", &blob_hash); trace!("Receiving blob data"); let stream = try_stream! { while let Some(mut field) = payload.try_next().await? { let field_name = field.name(); if field_name != "blob_data" { warn!( field_name, "Malfolmed request: 'blob_data' multipart field expected." ); Err(ErrorBadRequest("Bad request"))?; } while let Some(chunk) = field.try_next().await? { yield chunk; } } trace!("Stream done"); }; service.put_blob(blob_hash, stream).await?; Ok(HttpResponse::NoContent().finish()) } #[derive(Deserialize, Debug)] pub struct RemoveHolderPayload { holder: String, blob_hash: String, /// If true, the blob will be deleted intantly /// after the last holder is revoked. #[serde(default)] instant_delete: bool, } #[instrument(name = "remove_holder", skip(service))] pub async fn remove_holder_handler( service: web::Data, payload: web::Json, ) -> actix_web::Result { info!("Revoke holder request"); let RemoveHolderPayload { holder, blob_hash, instant_delete, } = payload.into_inner(); validate_identifier!(holder); validate_identifier!(blob_hash); service .revoke_holder(blob_hash, holder, instant_delete) .await?; Ok(HttpResponse::NoContent().finish()) } diff --git a/services/feature-flags/Cargo.lock b/services/feature-flags/Cargo.lock index ab92f9f5b..a0f6b9a7d 100644 --- a/services/feature-flags/Cargo.lock +++ b/services/feature-flags/Cargo.lock @@ -1,3047 +1,3059 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "actix-codec" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57a7559404a7f3573127aab53c08ce37a6c6a315c374a31070f3c91cd1b4a7fe" dependencies = [ "bitflags 1.3.2", "bytes", "futures-core", "futures-sink", "log", "memchr", "pin-project-lite", "tokio", "tokio-util", ] [[package]] name = "actix-http" version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0070905b2c4a98d184c4e81025253cb192aa8a73827553f38e9410801ceb35bb" dependencies = [ "actix-codec", "actix-rt", "actix-service", "actix-utils", "ahash", "base64", "bitflags 1.3.2", "brotli", "bytes", "bytestring", "derive_more", "encoding_rs", "flate2", "futures-core", "h2", "http", "httparse", "httpdate", "itoa", "language-tags", "local-channel", "mime", "percent-encoding", "pin-project-lite", "rand", "sha1", "smallvec", "tokio", "tokio-util", "tracing", "zstd", ] [[package]] name = "actix-macros" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "465a6172cf69b960917811022d8f29bc0b7fa1398bc4f78b3c466673db1213b6" dependencies = [ "quote", "syn 1.0.107", ] [[package]] name = "actix-router" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66ff4d247d2b160861fa2866457e85706833527840e4133f8f49aa423a38799" dependencies = [ "bytestring", "http", "regex", "serde", "tracing", ] [[package]] name = "actix-rt" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15265b6b8e2347670eb363c47fc8c75208b4a4994b27192f345fcbe707804f3e" dependencies = [ "futures-core", "tokio", ] [[package]] name = "actix-server" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e8613a75dd50cc45f473cee3c34d59ed677c0f7b44480ce3b8247d7dc519327" dependencies = [ "actix-rt", "actix-service", "actix-utils", "futures-core", "futures-util", "mio", "num_cpus", "socket2 0.4.7", "tokio", "tracing", ] [[package]] name = "actix-service" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" dependencies = [ "futures-core", "paste", "pin-project-lite", ] [[package]] name = "actix-utils" version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" dependencies = [ "local-waker", "pin-project-lite", ] [[package]] name = "actix-web" version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd3cb42f9566ab176e1ef0b8b3a896529062b4efc6be0123046095914c4c1c96" dependencies = [ "actix-codec", "actix-http", "actix-macros", "actix-router", "actix-rt", "actix-server", "actix-service", "actix-utils", "actix-web-codegen", "ahash", "bytes", "bytestring", "cfg-if", "cookie", "derive_more", "encoding_rs", "futures-core", "futures-util", "http", "itoa", "language-tags", "log", "mime", "once_cell", "pin-project-lite", "regex", "serde", "serde_json", "serde_urlencoded", "smallvec", "socket2 0.4.7", "time", "url", ] [[package]] name = "actix-web-codegen" version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2262160a7ae29e3415554a3f1fc04c764b1540c116aa524683208078b7a75bc9" dependencies = [ "actix-router", "proc-macro2", "quote", "syn 1.0.107", ] [[package]] name = "addr2line" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ "getrandom", "once_cell", "version_check", ] [[package]] name = "aho-corasick" version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] [[package]] name = "alloc-no-stdlib" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] name = "alloc-stdlib" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" dependencies = [ "alloc-no-stdlib", ] [[package]] name = "android-tzdata" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] name = "android_system_properties" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] [[package]] name = "anyhow" version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "async-stream" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", "pin-project-lite", ] [[package]] name = "async-stream-impl" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", "syn 2.0.29", ] [[package]] name = "async-trait" version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", "syn 2.0.29", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aws-config" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcdcf0d683fe9c23d32cf5b53c9918ea0a500375a9fb20109802552658e576c9" dependencies = [ "aws-credential-types", "aws-http", "aws-sdk-sso", "aws-sdk-sts", "aws-smithy-async", "aws-smithy-client", "aws-smithy-http", "aws-smithy-http-tower", "aws-smithy-json", "aws-smithy-types", "aws-types", "bytes", "fastrand 1.8.0", "hex", "http", "hyper", "ring", "time", "tokio", "tower", "tracing", "zeroize", ] [[package]] name = "aws-credential-types" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fcdb2f7acbc076ff5ad05e7864bdb191ca70a6fd07668dc3a1a8bcd051de5ae" dependencies = [ "aws-smithy-async", "aws-smithy-types", "fastrand 1.8.0", "tokio", "tracing", "zeroize", ] [[package]] name = "aws-endpoint" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cce1c41a6cfaa726adee9ebb9a56fcd2bbfd8be49fd8a04c5e20fd968330b04" dependencies = [ "aws-smithy-http", "aws-smithy-types", "aws-types", "http", "regex", "tracing", ] [[package]] name = "aws-http" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aadbc44e7a8f3e71c8b374e03ecd972869eb91dd2bc89ed018954a52ba84bc44" dependencies = [ "aws-credential-types", "aws-smithy-http", "aws-smithy-types", "aws-types", "bytes", "http", "http-body", "lazy_static", "percent-encoding", "pin-project-lite", "tracing", ] [[package]] name = "aws-sdk-dynamodb" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67fb64867fe098cffee7e34352b01bbfa2beb3aa1b2ff0e0a7bf9ff293557852" dependencies = [ "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", "aws-smithy-http", "aws-smithy-http-tower", "aws-smithy-json", "aws-smithy-types", "aws-types", "bytes", "fastrand 1.8.0", "http", "regex", "tokio-stream", "tower", "tracing", ] [[package]] name = "aws-sdk-secretsmanager" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "502ccd2a5469223f03116ed1ef8d310bfe3caa0e8398b968439cd8e76e4ae91c" dependencies = [ "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", "aws-smithy-http", "aws-smithy-http-tower", "aws-smithy-json", "aws-smithy-types", "aws-types", "bytes", "fastrand 1.8.0", "http", "regex", "tokio-stream", "tower", "tracing", ] [[package]] name = "aws-sdk-sso" version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8b812340d86d4a766b2ca73f740dfd47a97c2dff0c06c8517a16d88241957e4" dependencies = [ "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", "aws-smithy-http", "aws-smithy-http-tower", "aws-smithy-json", "aws-smithy-types", "aws-types", "bytes", "http", "regex", "tokio-stream", "tower", "tracing", ] [[package]] name = "aws-sdk-sts" version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "265fac131fbfc188e5c3d96652ea90ecc676a934e3174eaaee523c6cec040b3b" dependencies = [ "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", "aws-smithy-http", "aws-smithy-http-tower", "aws-smithy-json", "aws-smithy-query", "aws-smithy-types", "aws-smithy-xml", "aws-types", "bytes", "http", "regex", "tower", "tracing", ] [[package]] name = "aws-sig-auth" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b94acb10af0c879ecd5c7bdf51cda6679a0a4f4643ce630905a77673bfa3c61" dependencies = [ "aws-credential-types", "aws-sigv4", "aws-smithy-http", "aws-types", "http", "tracing", ] [[package]] name = "aws-sigv4" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2ce6f507be68e968a33485ced670111d1cbad161ddbbab1e313c03d37d8f4c" dependencies = [ "aws-smithy-http", "form_urlencoded", "hex", "hmac", "http", "once_cell", "percent-encoding", "regex", "sha2", "time", "tracing", ] [[package]] name = "aws-smithy-async" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13bda3996044c202d75b91afeb11a9afae9db9a721c6a7a427410018e286b880" dependencies = [ "futures-util", "pin-project-lite", "tokio", "tokio-stream", ] [[package]] name = "aws-smithy-client" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a86aa6e21e86c4252ad6a0e3e74da9617295d8d6e374d552be7d3059c41cedd" dependencies = [ "aws-smithy-async", "aws-smithy-http", "aws-smithy-http-tower", "aws-smithy-types", "bytes", "fastrand 1.8.0", "http", "http-body", "hyper", "hyper-rustls", "lazy_static", "pin-project-lite", "rustls 0.20.8", "tokio", "tower", "tracing", ] [[package]] name = "aws-smithy-http" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b3b693869133551f135e1f2c77cb0b8277d9e3e17feaf2213f735857c4f0d28" dependencies = [ "aws-smithy-types", "bytes", "bytes-utils", "futures-core", "http", "http-body", "hyper", "once_cell", "percent-encoding", "pin-project-lite", "pin-utils", "tokio", "tokio-util", "tracing", ] [[package]] name = "aws-smithy-http-tower" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ae4f6c5798a247fac98a867698197d9ac22643596dc3777f0c76b91917616b9" dependencies = [ "aws-smithy-http", "aws-smithy-types", "bytes", "http", "http-body", "pin-project-lite", "tower", "tracing", ] [[package]] name = "aws-smithy-json" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23f9f42fbfa96d095194a632fbac19f60077748eba536eb0b9fecc28659807f8" dependencies = [ "aws-smithy-types", ] [[package]] name = "aws-smithy-query" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98819eb0b04020a1c791903533b638534ae6c12e2aceda3e6e6fba015608d51d" dependencies = [ "aws-smithy-types", "urlencoding", ] [[package]] name = "aws-smithy-types" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16a3d0bf4f324f4ef9793b86a1701d9700fbcdbd12a846da45eed104c634c6e8" dependencies = [ "base64-simd", "itoa", "num-integer", "ryu", "time", ] [[package]] name = "aws-smithy-xml" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1b9d12875731bd07e767be7baad95700c3137b56730ec9ddeedb52a5e5ca63b" dependencies = [ "xmlparser", ] [[package]] name = "aws-types" version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dd209616cc8d7bfb82f87811a5c655dc97537f592689b18743bddf5dc5c4829" dependencies = [ "aws-credential-types", "aws-smithy-async", "aws-smithy-client", "aws-smithy-http", "aws-smithy-types", "http", "rustc_version", "tracing", ] [[package]] name = "axum" version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", "bitflags 1.3.2", "bytes", "futures-util", "http", "http-body", "hyper", "itoa", "matchit", "memchr", "mime", "percent-encoding", "pin-project-lite", "rustversion", "serde", "sync_wrapper", "tower", "tower-layer", "tower-service", ] [[package]] name = "axum-core" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", "futures-util", "http", "http-body", "mime", "rustversion", "tower-layer", "tower-service", ] [[package]] name = "backtrace" version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide 0.7.1", "object", "rustc-demangle", ] [[package]] name = "base64" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64-simd" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" dependencies = [ "outref", "vsimd", ] [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "block-buffer" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] [[package]] name = "brotli" version = "3.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", "brotli-decompressor", ] [[package]] name = "brotli-decompressor" version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", ] [[package]] name = "bumpalo" version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "bytes" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "bytes-utils" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e47d3a8076e283f3acd27400535992edb3ba4b5bb72f8891ad8fbe7932a7d4b9" dependencies = [ "bytes", "either", ] [[package]] name = "bytestring" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7f83e57d9154148e355404702e2694463241880b939570d7c97c014da7a69a1" dependencies = [ "bytes", ] [[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", "windows-targets 0.48.5", ] [[package]] name = "clap" version = "4.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f13b9c79b5d1dd500d20ef541215a6423c75829ef43117e1b4d17fd8af0b5d76" dependencies = [ "bitflags 1.3.2", "clap_derive", "clap_lex", "is-terminal", "once_cell", "strsim", "termcolor", ] [[package]] name = "clap_derive" version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8" dependencies = [ "heck", "proc-macro-error", "proc-macro2", "quote", "syn 1.0.107", ] [[package]] name = "clap_lex" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "783fe232adfca04f90f56201b26d79682d4cd2625e0bc7290b95123afe558ade" dependencies = [ "os_str_bytes", ] [[package]] name = "codespan-reporting" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" dependencies = [ "termcolor", "unicode-width", ] [[package]] name = "comm-lib" version = "0.1.0" dependencies = [ "anyhow", "aws-config", "aws-sdk-dynamodb", "aws-sdk-secretsmanager", "base64", "chrono", "constant_time_eq", "derive_more", "grpc_clients", + "hex", "rand", "serde", "serde_json", + "sha2", "tokio", "tracing", + "uuid", ] [[package]] name = "constant_time_eq" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" [[package]] name = "convert_case" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "cookie" version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" dependencies = [ "percent-encoding", "time", "version_check", ] [[package]] name = "core-foundation" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "core-foundation-sys" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "cxx" version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62" dependencies = [ "cc", "cxxbridge-flags", "cxxbridge-macro", "link-cplusplus", ] [[package]] name = "cxx-build" version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690" dependencies = [ "cc", "codespan-reporting", "once_cell", "proc-macro2", "quote", "scratch", "syn 1.0.107", ] [[package]] name = "cxxbridge-flags" version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf" [[package]] name = "cxxbridge-macro" version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" dependencies = [ "proc-macro2", "quote", "syn 1.0.107", ] [[package]] name = "derive_more" version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", "proc-macro2", "quote", "rustc_version", "syn 1.0.107", ] [[package]] name = "digest" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer", "crypto-common", "subtle", ] [[package]] name = "either" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encoding_rs" version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" dependencies = [ "errno-dragonfly", "libc", "winapi", ] [[package]] name = "errno" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" dependencies = [ "errno-dragonfly", "libc", "windows-sys 0.48.0", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "fastrand" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] [[package]] name = "fastrand" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" [[package]] name = "feature-flags" version = "0.1.0" dependencies = [ "actix-web", "anyhow", "aws-config", "aws-sdk-dynamodb", "aws-types", "clap", "comm-lib", "http", "once_cell", "serde", "tokio", "tracing", "tracing-subscriber", ] [[package]] name = "fixedbitset" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", "miniz_oxide 0.6.2", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ "percent-encoding", ] [[package]] name = "futures-channel" version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" dependencies = [ "futures-core", ] [[package]] name = "futures-core" version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" [[package]] name = "futures-sink" version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" [[package]] name = "futures-task" version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" [[package]] name = "futures-util" version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" dependencies = [ "futures-core", "futures-task", "pin-project-lite", "pin-utils", ] [[package]] name = "generic-array" version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "gimli" version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "grpc_clients" version = "0.1.0" dependencies = [ "derive_more", "prost", "tonic", "tonic-build", "tracing", "tracing-subscriber", ] [[package]] name = "h2" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66b91535aa35fea1523ad1b86cb6b53c28e0ae566ba4a460f4457e936cad7c6f" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", "http", "indexmap 1.9.2", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ "libc", ] [[package]] name = "hermit-abi" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "856b5cb0902c2b6d65d5fd97dfa30f9b70c7538e770b98eab5ed52d8db923e01" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hmac" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ "digest", ] [[package]] name = "home" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "http" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http-body" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", "pin-project-lite", ] [[package]] name = "httparse" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2", "http", "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", "socket2 0.4.7", "tokio", "tower-service", "tracing", "want", ] [[package]] name = "hyper-rustls" version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", "log", "rustls 0.20.8", "rustls-native-certs", "tokio", "tokio-rustls 0.23.4", ] [[package]] name = "hyper-timeout" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite", "tokio", "tokio-io-timeout", ] [[package]] name = "iana-time-zone" version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", "winapi", ] [[package]] name = "iana-time-zone-haiku" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" dependencies = [ "cxx", "cxx-build", ] [[package]] name = "idna" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown 0.12.3", ] [[package]] name = "indexmap" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", ] [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "io-lifetimes" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" dependencies = [ "libc", "windows-sys 0.45.0", ] [[package]] name = "is-terminal" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22e18b0a45d56fe973d6db23972bf5bc46f988a4a2385deac9cc29572f09daef" dependencies = [ "hermit-abi 0.3.0", "io-lifetimes", "rustix 0.36.8", "windows-sys 0.45.0", ] [[package]] name = "itertools" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "jobserver" version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] name = "js-sys" version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] [[package]] name = "language-tags" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "link-cplusplus" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" dependencies = [ "cc", ] [[package]] name = "linux-raw-sys" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" [[package]] name = "local-channel" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f303ec0e94c6c54447f84f3b0ef7af769858a9c4ef56ef2a986d3dcd4c3fc9c" dependencies = [ "futures-core", "futures-sink", "futures-util", "local-waker", ] [[package]] name = "local-waker" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e34f76eb3611940e0e7d53a9aaa4e6a3151f69541a282fd0dad5571420c53ff1" [[package]] name = "lock_api" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] [[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata", ] [[package]] name = "matchit" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "mime" version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] name = "miniz_oxide" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", ] [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "log", "wasi", "windows-sys 0.48.0", ] [[package]] name = "multimap" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "nu-ansi-term" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ "overload", "winapi", ] [[package]] name = "num-integer" version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", ] [[package]] name = "num-traits" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ "hermit-abi 0.2.6", "libc", ] [[package]] name = "object" version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "os_str_bytes" version = "6.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" [[package]] name = "outref" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking_lot" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", "redox_syscall 0.2.16", "smallvec", "windows-sys 0.45.0", ] [[package]] name = "paste" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" [[package]] name = "percent-encoding" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "petgraph" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", "indexmap 2.0.0", ] [[package]] name = "pin-project" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", "syn 1.0.107", ] [[package]] name = "pin-project-lite" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", "syn 1.0.107", ] [[package]] name = "proc-macro-error" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", "syn 1.0.107", "version_check", ] [[package]] name = "proc-macro-error-attr" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", "version_check", ] [[package]] name = "proc-macro2" version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] [[package]] name = "prost" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", ] [[package]] name = "prost-build" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", "itertools", "lazy_static", "log", "multimap", "petgraph", "prettyplease", "prost", "prost-types", "regex", "syn 1.0.107", "tempfile", "which", ] [[package]] name = "prost-derive" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", "proc-macro2", "quote", "syn 1.0.107", ] [[package]] name = "prost-types" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ "prost", ] [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "redox_syscall" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_syscall" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "regex" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "ring" version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", "once_cell", "spin", "untrusted", "web-sys", "winapi", ] [[package]] name = "rustc-demangle" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] name = "rustix" version = "0.36.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644" dependencies = [ "bitflags 1.3.2", "errno 0.2.8", "io-lifetimes", "libc", "linux-raw-sys 0.1.4", "windows-sys 0.45.0", ] [[package]] name = "rustix" version = "0.38.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f" dependencies = [ "bitflags 2.4.0", "errno 0.3.3", "libc", "linux-raw-sys 0.4.7", "windows-sys 0.48.0", ] [[package]] name = "rustls" version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", "sct", "webpki", ] [[package]] name = "rustls" version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring", "rustls-webpki 0.101.6", "sct", ] [[package]] name = "rustls-native-certs" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", "rustls-pemfile", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ "base64", ] [[package]] name = "rustls-webpki" version = "0.100.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" dependencies = [ "ring", "untrusted", ] [[package]] name = "rustls-webpki" version = "0.101.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" dependencies = [ "ring", "untrusted", ] [[package]] name = "rustversion" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" [[package]] name = "schannel" version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ "windows-sys 0.42.0", ] [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" [[package]] name = "sct" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", ] [[package]] name = "security-framework" version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", ] [[package]] name = "security-framework-sys" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "semver" version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" [[package]] name = "serde" version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", "syn 1.0.107", ] [[package]] name = "serde_json" version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" dependencies = [ "itoa", "ryu", "serde", ] [[package]] name = "serde_urlencoded" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", "serde", ] [[package]] name = "sha1" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sha2" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sharded-slab" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] [[package]] name = "signal-hook-registry" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" dependencies = [ "autocfg", ] [[package]] name = "smallvec" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", ] [[package]] name = "socket2" version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" dependencies = [ "libc", "windows-sys 0.48.0", ] [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subtle" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "sync_wrapper" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "tempfile" version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", "fastrand 2.0.0", "redox_syscall 0.3.5", "rustix 0.38.14", "windows-sys 0.48.0", ] [[package]] name = "termcolor" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] [[package]] name = "thread_local" version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ "once_cell", ] [[package]] name = "time" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ "itoa", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" dependencies = [ "time-core", ] [[package]] name = "tinyvec" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ "backtrace", "bytes", "libc", "mio", "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2 0.5.3", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-io-timeout" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite", "tokio", ] [[package]] name = "tokio-macros" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", "syn 2.0.29", ] [[package]] name = "tokio-rustls" version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls 0.20.8", "tokio", "webpki", ] [[package]] name = "tokio-rustls" version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls 0.21.7", "tokio", ] [[package]] name = "tokio-stream" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite", "tokio", ] [[package]] name = "tokio-util" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", "tracing", ] [[package]] name = "tonic" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ "async-stream", "async-trait", "axum", "base64", "bytes", "futures-core", "futures-util", "h2", "http", "http-body", "hyper", "hyper-timeout", "percent-encoding", "pin-project", "prost", "rustls-pemfile", "tokio", "tokio-rustls 0.24.1", "tokio-stream", "tower", "tower-layer", "tower-service", "tracing", "webpki-roots", ] [[package]] name = "tonic-build" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "quote", "syn 1.0.107", ] [[package]] name = "tower" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", "indexmap 1.9.2", "pin-project", "pin-project-lite", "rand", "slab", "tokio", "tokio-util", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tower-layer" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", "syn 1.0.107", ] [[package]] name = "tracing-core" version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", "valuable", ] [[package]] name = "tracing-log" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", "log", "tracing-core", ] [[package]] name = "tracing-subscriber" version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", ] [[package]] name = "try-lock" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "typenum" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "unicode-bidi" version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" [[package]] name = "unicode-ident" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" [[package]] name = "unicode-normalization" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "untrusted" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] [[package]] name = "urlencoding" version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" +[[package]] +name = "uuid" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +dependencies = [ + "getrandom", +] + [[package]] name = "valuable" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vsimd" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" [[package]] name = "want" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ "log", "try-lock", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn 1.0.107", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", "syn 1.0.107", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "web-sys" version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "webpki" version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" dependencies = [ "ring", "untrusted", ] [[package]] name = "webpki-roots" version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" dependencies = [ "rustls-webpki 0.100.3", ] [[package]] name = "which" version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", "home", "once_cell", "rustix 0.38.14", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm 0.42.1", "windows_aarch64_msvc 0.42.1", "windows_i686_gnu 0.42.1", "windows_i686_msvc 0.42.1", "windows_x86_64_gnu 0.42.1", "windows_x86_64_gnullvm 0.42.1", "windows_x86_64_msvc 0.42.1", ] [[package]] name = "windows-sys" version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ "windows-targets 0.42.1", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.5", ] [[package]] name = "windows-targets" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" dependencies = [ "windows_aarch64_gnullvm 0.42.1", "windows_aarch64_msvc 0.42.1", "windows_i686_gnu 0.42.1", "windows_i686_msvc 0.42.1", "windows_x86_64_gnu 0.42.1", "windows_x86_64_gnullvm 0.42.1", "windows_x86_64_msvc 0.42.1", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm 0.48.5", "windows_aarch64_msvc 0.48.5", "windows_i686_gnu 0.48.5", "windows_i686_msvc 0.48.5", "windows_x86_64_gnu 0.48.5", "windows_x86_64_gnullvm 0.48.5", "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "xmlparser" version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d25c75bf9ea12c4040a97f829154768bbbce366287e2dc044af160cd79a13fd" [[package]] name = "zeroize" version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" [[package]] name = "zstd" version = "0.12.3+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" version = "6.0.4+zstd.1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7afb4b54b8910cf5447638cb54bf4e8a65cbedd783af98b98c62ffe91f185543" dependencies = [ "libc", "zstd-sys", ] [[package]] name = "zstd-sys" version = "2.0.7+zstd.1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5" dependencies = [ "cc", "libc", "pkg-config", ] diff --git a/services/identity/src/database.rs b/services/identity/src/database.rs index ac34ce201..fd1cbd1e9 100644 --- a/services/identity/src/database.rs +++ b/services/identity/src/database.rs @@ -1,1728 +1,1728 @@ use comm_lib::aws::ddb::{ operation::{ delete_item::DeleteItemOutput, get_item::GetItemOutput, put_item::PutItemOutput, query::QueryOutput, }, primitives::Blob, types::{AttributeValue, PutRequest, ReturnConsumedCapacity, WriteRequest}, }; use comm_lib::aws::{AwsConfig, DynamoDBClient}; use comm_lib::database::{ AttributeExtractor, AttributeMap, DBItemAttributeError, DBItemError, TryFromAttribute, }; use constant_time_eq::constant_time_eq; use std::collections::{HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; use crate::ddb_utils::{ - create_one_time_key_partition_key, into_one_time_put_requests, - EthereumIdentity, Identifier, OlmAccountType, + create_one_time_key_partition_key, into_one_time_put_requests, Identifier, + OlmAccountType, }; use crate::error::{consume_error, Error}; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use tracing::{debug, error, info, warn}; use crate::client_service::{FlattenedDeviceKeyUpload, UserRegistrationInfo}; use crate::config::CONFIG; use crate::constants::{ ACCESS_TOKEN_SORT_KEY, ACCESS_TOKEN_TABLE, ACCESS_TOKEN_TABLE_AUTH_TYPE_ATTRIBUTE, ACCESS_TOKEN_TABLE_CREATED_ATTRIBUTE, ACCESS_TOKEN_TABLE_PARTITION_KEY, ACCESS_TOKEN_TABLE_TOKEN_ATTRIBUTE, ACCESS_TOKEN_TABLE_VALID_ATTRIBUTE, CONTENT_ONE_TIME_KEY, NONCE_TABLE, NONCE_TABLE_CREATED_ATTRIBUTE, NONCE_TABLE_EXPIRATION_TIME_ATTRIBUTE, NONCE_TABLE_EXPIRATION_TIME_UNIX_ATTRIBUTE, NONCE_TABLE_PARTITION_KEY, NOTIF_ONE_TIME_KEY, RESERVED_USERNAMES_TABLE, RESERVED_USERNAMES_TABLE_PARTITION_KEY, USERS_TABLE, USERS_TABLE_DEVICES_ATTRIBUTE, USERS_TABLE_DEVICES_MAP_CONTENT_ONE_TIME_KEYS_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_SIGNATURE_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_DEVICE_TYPE_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_SIGNATURE_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_NOTIF_ONE_TIME_KEYS_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_SIGNATURE_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_SOCIAL_PROOF_ATTRIBUTE_NAME, USERS_TABLE_PARTITION_KEY, USERS_TABLE_REGISTRATION_ATTRIBUTE, USERS_TABLE_USERNAME_ATTRIBUTE, USERS_TABLE_USERNAME_INDEX, USERS_TABLE_WALLET_ADDRESS_ATTRIBUTE, USERS_TABLE_WALLET_ADDRESS_INDEX, }; use crate::error::{AttributeValueFromHashMap, FromAttributeValue}; use crate::id::generate_uuid; use crate::nonce::NonceData; use crate::token::{AccessTokenData, AuthType}; pub use grpc_clients::identity::DeviceType; mod device_list; pub use device_list::DeviceListRow; #[derive(Serialize, Deserialize)] pub struct OlmKeys { pub curve25519: String, pub ed25519: String, } #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct KeyPayload { pub notification_identity_public_keys: OlmKeys, pub primary_identity_public_keys: OlmKeys, } impl FromStr for KeyPayload { type Err = serde_json::Error; // The payload is held in the database as an escaped JSON payload. // Escaped double quotes need to be trimmed before attempting to serialize fn from_str(payload: &str) -> Result { serde_json::from_str(&payload.replace(r#"\""#, r#"""#)) } } pub struct DBDeviceTypeInt(pub i32); impl TryFrom for DeviceType { type Error = crate::error::Error; fn try_from(value: DBDeviceTypeInt) -> Result { let device_result = DeviceType::try_from(value.0); device_result.map_err(|_| { Error::Attribute(DBItemError { attribute_name: USERS_TABLE_DEVICES_MAP_DEVICE_TYPE_ATTRIBUTE_NAME .to_string(), attribute_value: Some(AttributeValue::N(value.0.to_string())).into(), attribute_error: DBItemAttributeError::InvalidValue, }) }) } } // This is very similar to the protobuf definitions, however, // coupling the protobuf schema to the database API should be avoided. pub struct PreKey { pub prekey: String, pub prekey_signature: String, } pub struct OutboundKeys { pub key_payload: String, pub key_payload_signature: String, pub social_proof: Option, pub content_prekey: PreKey, pub notif_prekey: PreKey, pub content_one_time_key: Option, pub notif_one_time_key: Option, } #[derive(Clone)] pub struct DatabaseClient { client: Arc, } impl DatabaseClient { pub fn new(aws_config: &AwsConfig) -> Self { let client = match &CONFIG.localstack_endpoint { Some(endpoint) => { info!( "Configuring DynamoDB client to use LocalStack endpoint: {}", endpoint ); let ddb_config_builder = comm_lib::aws::ddb::config::Builder::from(aws_config) .endpoint_url(endpoint); DynamoDBClient::from_conf(ddb_config_builder.build()) } None => DynamoDBClient::new(aws_config), }; DatabaseClient { client: Arc::new(client), } } pub async fn add_password_user_to_users_table( &self, registration_state: UserRegistrationInfo, password_file: Vec, code_version: u64, access_token_creation_time: DateTime, ) -> Result { let device_key_upload = registration_state.flattened_device_key_upload; let user_id = self .add_user_to_users_table( device_key_upload.clone(), Some((registration_state.username, Blob::new(password_file))), None, None, registration_state.user_id, ) .await?; self .add_device( &user_id, device_key_upload, None, code_version, access_token_creation_time, ) .await?; Ok(user_id) } pub async fn add_wallet_user_to_users_table( &self, flattened_device_key_upload: FlattenedDeviceKeyUpload, wallet_address: String, social_proof: String, user_id: Option, code_version: u64, access_token_creation_time: DateTime, ) -> Result { let social_proof = Some(social_proof); let user_id = self .add_user_to_users_table( flattened_device_key_upload.clone(), None, Some(wallet_address), social_proof.clone(), user_id, ) .await?; self .add_device( &user_id, flattened_device_key_upload, social_proof, code_version, access_token_creation_time, ) .await?; Ok(user_id) } async fn add_user_to_users_table( &self, flattened_device_key_upload: FlattenedDeviceKeyUpload, username_and_password_file: Option<(String, Blob)>, wallet_address: Option, social_proof: Option, user_id: Option, ) -> Result { let user_id = user_id.unwrap_or_else(generate_uuid); let device_info = create_device_info(flattened_device_key_upload.clone(), social_proof); let devices = HashMap::from([( flattened_device_key_upload.device_id_key.clone(), AttributeValue::M(device_info), )]); let mut user = HashMap::from([ ( USERS_TABLE_PARTITION_KEY.to_string(), AttributeValue::S(user_id.clone()), ), ( USERS_TABLE_DEVICES_ATTRIBUTE.to_string(), AttributeValue::M(devices), ), ]); if let Some((username, password_file)) = username_and_password_file { user.insert( USERS_TABLE_USERNAME_ATTRIBUTE.to_string(), AttributeValue::S(username), ); user.insert( USERS_TABLE_REGISTRATION_ATTRIBUTE.to_string(), AttributeValue::B(password_file), ); } if let Some(address) = wallet_address { user.insert( USERS_TABLE_WALLET_ADDRESS_ATTRIBUTE.to_string(), AttributeValue::S(address), ); } self .client .put_item() .table_name(USERS_TABLE) .set_item(Some(user)) // make sure we don't accidentaly overwrite existing row .condition_expression("attribute_not_exists(#pk)") .expression_attribute_names("#pk", USERS_TABLE_PARTITION_KEY) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; self .append_one_time_prekeys( flattened_device_key_upload.device_id_key, flattened_device_key_upload.content_one_time_keys, flattened_device_key_upload.notif_one_time_keys, ) .await?; Ok(user_id) } pub async fn add_password_user_device_to_users_table( &self, user_id: String, flattened_device_key_upload: FlattenedDeviceKeyUpload, code_version: u64, access_token_creation_time: DateTime, ) -> Result<(), Error> { // add device to the legacy device list self .add_device_to_users_table( user_id.clone(), flattened_device_key_upload.clone(), None, ) .await?; // add device to the new device list if not exists let device_id = flattened_device_key_upload.device_id_key.clone(); let device_exists = self .device_exists(user_id.clone(), device_id.clone()) .await?; if device_exists { self .update_device_login_time( user_id.clone(), device_id, access_token_creation_time, ) .await?; return Ok(()); } self .add_device( user_id, flattened_device_key_upload, None, code_version, access_token_creation_time, ) .await } pub async fn add_wallet_user_device_to_users_table( &self, user_id: String, flattened_device_key_upload: FlattenedDeviceKeyUpload, social_proof: String, code_version: u64, access_token_creation_time: DateTime, ) -> Result<(), Error> { // add device to the legacy device list self .add_device_to_users_table( user_id.clone(), flattened_device_key_upload.clone(), Some(social_proof.clone()), ) .await?; // add device to the new device list if not exists let device_id = flattened_device_key_upload.device_id_key.clone(); let device_exists = self .device_exists(user_id.clone(), device_id.clone()) .await?; if device_exists { self .update_device_login_time( user_id.clone(), device_id, access_token_creation_time, ) .await?; return Ok(()); } // add device to the new device list self .add_device( user_id, flattened_device_key_upload, Some(social_proof), code_version, access_token_creation_time, ) .await } pub async fn get_keyserver_keys_for_user( &self, user_id: &str, ) -> Result, Error> { use crate::grpc_services::protos::unauth::DeviceType as GrpcDeviceType; // DynamoDB doesn't have a way to "pop" a value from a list, so we must // first read in user info, then update one_time_keys with value we // gave to requester let user_info = self .get_item_from_users_table(user_id) .await? .item .ok_or(Error::MissingItem)?; let user_id = user_info .get(USERS_TABLE_PARTITION_KEY) .ok_or(Error::MissingItem)? .to_string(USERS_TABLE_PARTITION_KEY)?; let devices = user_info .get(USERS_TABLE_DEVICES_ATTRIBUTE) .ok_or(Error::MissingItem)? .to_hashmap(USERS_TABLE_DEVICES_ATTRIBUTE)?; let user_devices = self.get_current_devices(user_id).await?; let maybe_keyserver_device = user_devices .into_iter() .find(|device| device.device_type == GrpcDeviceType::Keyserver); if let Some(keyserver) = maybe_keyserver_device { debug!( "Found keyserver in devices table (ID={})", &keyserver.device_id ); let notif_one_time_key: Option = self .get_one_time_key(&keyserver.device_id, OlmAccountType::Notification) .await?; let content_one_time_key: Option = self .get_one_time_key(&keyserver.device_id, OlmAccountType::Content) .await?; debug!( "Able to get notif one-time key for keyserver {}: {}", &keyserver.device_id, notif_one_time_key.is_some() ); debug!( "Able to get content one-time key for keyserver {}: {}", &keyserver.device_id, content_one_time_key.is_some() ); let outbound_payload = OutboundKeys { key_payload: keyserver.device_key_info.key_payload, key_payload_signature: keyserver.device_key_info.key_payload_signature, social_proof: keyserver.device_key_info.social_proof, content_prekey: PreKey { prekey: keyserver.content_prekey.pre_key, prekey_signature: keyserver.content_prekey.pre_key_signature, }, notif_prekey: PreKey { prekey: keyserver.notif_prekey.pre_key, prekey_signature: keyserver.notif_prekey.pre_key_signature, }, content_one_time_key, notif_one_time_key, }; return Ok(Some(outbound_payload)); } let mut maybe_keyserver_id = None; for (device_id, device_info) in devices { let device_type = device_info .to_hashmap("device_id")? .get(USERS_TABLE_DEVICES_MAP_DEVICE_TYPE_ATTRIBUTE_NAME) .ok_or(Error::MissingItem)? .to_string(USERS_TABLE_DEVICES_MAP_DEVICE_TYPE_ATTRIBUTE_NAME)?; if device_type == "keyserver" { maybe_keyserver_id = Some(device_id); break; } } // Assert that the user has a keyserver, if they don't return None let keyserver_id = match maybe_keyserver_id { None => return Ok(None), Some(id) => id, }; let keyserver = devices.get_map(keyserver_id)?; let notif_one_time_key: Option = self .get_one_time_key(keyserver_id, OlmAccountType::Notification) .await?; let content_one_time_key: Option = self .get_one_time_key(keyserver_id, OlmAccountType::Content) .await?; debug!( "Able to get notif one-time key for keyserver {}: {}", keyserver_id, notif_one_time_key.is_some() ); debug!( "Able to get content one-time key for keyserver {}: {}", keyserver_id, content_one_time_key.is_some() ); let content_prekey = keyserver .get_string(USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_ATTRIBUTE_NAME)?; let content_prekey_signature = keyserver.get_string( USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_SIGNATURE_ATTRIBUTE_NAME, )?; let notif_prekey = keyserver .get_string(USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_ATTRIBUTE_NAME)?; let notif_prekey_signature = keyserver.get_string( USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_SIGNATURE_ATTRIBUTE_NAME, )?; let key_payload = keyserver .get_string(USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_ATTRIBUTE_NAME)? .to_string(); let key_payload_signature = keyserver .get_string(USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_SIGNATURE_ATTRIBUTE_NAME)? .to_string(); let social_proof = keyserver .get(USERS_TABLE_DEVICES_MAP_SOCIAL_PROOF_ATTRIBUTE_NAME) .and_then(|s| { s.to_string(USERS_TABLE_DEVICES_MAP_SOCIAL_PROOF_ATTRIBUTE_NAME) .ok() }) .map(|s| s.to_owned()); let full_content_prekey = PreKey { prekey: content_prekey.to_string(), prekey_signature: content_prekey_signature.to_string(), }; let full_notif_prekey = PreKey { prekey: notif_prekey.to_string(), prekey_signature: notif_prekey_signature.to_string(), }; let outbound_payload = OutboundKeys { key_payload, key_payload_signature, social_proof, content_prekey: full_content_prekey, notif_prekey: full_notif_prekey, content_one_time_key, notif_one_time_key, }; Ok(Some(outbound_payload)) } /// Will "mint" a single one-time key by attempting to successfully delete a /// key pub async fn get_one_time_key( &self, device_id: &str, account_type: OlmAccountType, ) -> Result, Error> { use crate::constants::one_time_keys_table as otk_table; use crate::constants::ONE_TIME_KEY_MINIMUM_THRESHOLD; let query_result = self.get_one_time_keys(device_id, account_type).await?; let items = query_result.items(); fn spawn_refresh_keys_task(device_id: &str) { // Clone the string slice to move into the async block let device_id = device_id.to_string(); tokio::spawn(async move { debug!("Attempting to request more keys for device: {}", &device_id); let result = crate::tunnelbroker::send_refresh_keys_request(&device_id).await; consume_error(result); }); } // If no one-time keys exist, or if there aren't enough, request more. // Additionally, if no one-time keys exist, return early. let item_vec = if let Some(items_list) = items { if items_list.len() < ONE_TIME_KEY_MINIMUM_THRESHOLD { spawn_refresh_keys_task(device_id); } items_list } else { debug!("Unable to find {:?} one-time key", account_type); spawn_refresh_keys_task(device_id); return Ok(None); }; let mut result = None; // Attempt to delete the one-time keys individually, a successful delete // mints the one-time key to the requester for item in item_vec { let pk = item.get_string(otk_table::PARTITION_KEY)?; let otk = item.get_string(otk_table::SORT_KEY)?; let composite_key = HashMap::from([ ( otk_table::PARTITION_KEY.to_string(), AttributeValue::S(pk.to_string()), ), ( otk_table::SORT_KEY.to_string(), AttributeValue::S(otk.to_string()), ), ]); debug!("Attempting to delete a {:?} one time key", account_type); match self .client .delete_item() .set_key(Some(composite_key)) .table_name(otk_table::NAME) .send() .await { Ok(_) => { result = Some(otk.to_string()); break; } // This err should only happen if a delete occurred between the read // above and this delete Err(e) => { debug!("Unable to delete key: {:?}", e); continue; } } } // Return deleted key Ok(result) } pub async fn get_one_time_keys( &self, device_id: &str, account_type: OlmAccountType, ) -> Result { use crate::constants::one_time_keys_table::*; // Add related prefix to partition key to grab the correct result set let partition_key = create_one_time_key_partition_key(device_id, account_type); self .client .query() .table_name(NAME) .key_condition_expression(format!("{} = :pk", PARTITION_KEY)) .expression_attribute_values(":pk", AttributeValue::S(partition_key)) .return_consumed_capacity(ReturnConsumedCapacity::Total) .send() .await .map_err(|e| Error::AwsSdk(e.into())) .map(|response| { let capacity_units = response .consumed_capacity() .and_then(|it| it.capacity_units()); debug!("OTK read consumed capacity: {:?}", capacity_units); response }) } pub async fn set_prekey( &self, user_id: String, device_id: String, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, ) -> Result<(), Error> { // update new device list too self .update_device_prekeys( user_id.clone(), device_id.clone(), content_prekey.clone(), content_prekey_signature.clone(), notif_prekey.clone(), notif_prekey_signature.clone(), ) .await?; let notif_prekey_av = AttributeValue::S(notif_prekey); let notif_prekey_signature_av = AttributeValue::S(notif_prekey_signature); let content_prekey_av = AttributeValue::S(content_prekey); let content_prekey_signature_av = AttributeValue::S(content_prekey_signature); let update_expression = format!("SET {0}.#{1}.{2} = :n, {0}.#{1}.{3} = :p, {0}.#{1}.{4} = :c, {0}.#{1}.{5} = :d", USERS_TABLE_DEVICES_ATTRIBUTE, "deviceID", USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_SIGNATURE_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_SIGNATURE_ATTRIBUTE_NAME, ); let expression_attribute_names = HashMap::from([ (format!("#{}", "deviceID"), device_id), ( "#user_id".to_string(), USERS_TABLE_PARTITION_KEY.to_string(), ), ]); let expression_attribute_values = HashMap::from([ (":n".to_string(), notif_prekey_av), (":p".to_string(), notif_prekey_signature_av), (":c".to_string(), content_prekey_av), (":d".to_string(), content_prekey_signature_av), ]); self .client .update_item() .table_name(USERS_TABLE) .key(USERS_TABLE_PARTITION_KEY, AttributeValue::S(user_id)) .update_expression(update_expression) .condition_expression("attribute_exists(#user_id)") .set_expression_attribute_names(Some(expression_attribute_names)) .set_expression_attribute_values(Some(expression_attribute_values)) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; Ok(()) } pub async fn append_one_time_prekeys( &self, device_id: String, content_one_time_keys: Vec, notif_one_time_keys: Vec, ) -> Result<(), Error> { use crate::constants::one_time_keys_table; let mut otk_requests = into_one_time_put_requests( &device_id, content_one_time_keys, OlmAccountType::Content, ); let notif_otk_requests: Vec = into_one_time_put_requests( &device_id, notif_one_time_keys, OlmAccountType::Notification, ); otk_requests.extend(notif_otk_requests); // BatchWriteItem has a hard limit of 25 writes per call for requests in otk_requests.chunks(25) { self .client .batch_write_item() .request_items(one_time_keys_table::NAME, requests.to_vec()) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; } Ok(()) } async fn add_device_to_users_table( &self, user_id: String, flattened_device_key_upload: FlattenedDeviceKeyUpload, social_proof: Option, ) -> Result<(), Error> { // Avoid borrowing from lifetime of flattened_device_key_upload let device_id = flattened_device_key_upload.device_id_key.clone(); let content_one_time_keys = flattened_device_key_upload.content_one_time_keys.clone(); let notif_one_time_keys = flattened_device_key_upload.notif_one_time_keys.clone(); let device_info = create_device_info(flattened_device_key_upload, social_proof); let update_expression = format!("SET {}.#{} = :v", USERS_TABLE_DEVICES_ATTRIBUTE, "deviceID",); let expression_attribute_names = HashMap::from([(format!("#{}", "deviceID"), device_id.clone())]); let expression_attribute_values = HashMap::from([(":v".to_string(), AttributeValue::M(device_info))]); self .client .update_item() .table_name(USERS_TABLE) .key(USERS_TABLE_PARTITION_KEY, AttributeValue::S(user_id)) .update_expression(update_expression) .set_expression_attribute_names(Some(expression_attribute_names)) .set_expression_attribute_values(Some(expression_attribute_values)) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; self .append_one_time_prekeys( device_id, content_one_time_keys, notif_one_time_keys, ) .await?; Ok(()) } pub async fn remove_device_from_users_table( &self, user_id: String, device_id_key: String, ) -> Result<(), Error> { // delete from new device list too self.remove_device(&user_id, &device_id_key).await?; let update_expression = format!("REMOVE {}.{}", USERS_TABLE_DEVICES_ATTRIBUTE, "#deviceID"); let expression_attribute_names = HashMap::from([("#deviceID".to_string(), device_id_key)]); self .client .update_item() .table_name(USERS_TABLE) .key(USERS_TABLE_PARTITION_KEY, AttributeValue::S(user_id)) .update_expression(update_expression) .set_expression_attribute_names(Some(expression_attribute_names)) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; Ok(()) } pub async fn update_user_password( &self, user_id: String, password_file: Vec, ) -> Result<(), Error> { let update_expression = format!("SET {} = :p", USERS_TABLE_REGISTRATION_ATTRIBUTE); let expression_attribute_values = HashMap::from([( ":p".to_string(), AttributeValue::B(Blob::new(password_file)), )]); self .client .update_item() .table_name(USERS_TABLE) .key(USERS_TABLE_PARTITION_KEY, AttributeValue::S(user_id)) .update_expression(update_expression) .set_expression_attribute_values(Some(expression_attribute_values)) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; Ok(()) } pub async fn delete_user( &self, user_id: String, ) -> Result { debug!(user_id, "Attempting to delete user's devices"); self.delete_devices_table_rows_for_user(&user_id).await?; debug!(user_id, "Attempting to delete user"); match self .client .delete_item() .table_name(USERS_TABLE) .key( USERS_TABLE_PARTITION_KEY, AttributeValue::S(user_id.clone()), ) .send() .await { Ok(out) => { info!("User has been deleted {}", user_id); Ok(out) } Err(e) => { error!("DynamoDB client failed to delete user {}", user_id); Err(Error::AwsSdk(e.into())) } } } pub async fn get_access_token_data( &self, user_id: String, signing_public_key: String, ) -> Result, Error> { let primary_key = create_composite_primary_key( ( ACCESS_TOKEN_TABLE_PARTITION_KEY.to_string(), user_id.clone(), ), ( ACCESS_TOKEN_SORT_KEY.to_string(), signing_public_key.clone(), ), ); let get_item_result = self .client .get_item() .table_name(ACCESS_TOKEN_TABLE) .set_key(Some(primary_key)) .consistent_read(true) .send() .await; match get_item_result { Ok(GetItemOutput { item: Some(mut item), .. }) => { let created = DateTime::::try_from_attr( ACCESS_TOKEN_TABLE_CREATED_ATTRIBUTE, item.remove(ACCESS_TOKEN_TABLE_CREATED_ATTRIBUTE), )?; let auth_type = parse_auth_type_attribute( item.remove(ACCESS_TOKEN_TABLE_AUTH_TYPE_ATTRIBUTE), )?; let valid = parse_valid_attribute( item.remove(ACCESS_TOKEN_TABLE_VALID_ATTRIBUTE), )?; let access_token = parse_token_attribute( item.remove(ACCESS_TOKEN_TABLE_TOKEN_ATTRIBUTE), )?; Ok(Some(AccessTokenData { user_id, signing_public_key, access_token, created, auth_type, valid, })) } Ok(_) => { info!( "No item found for user {} and signing public key {} in token table", user_id, signing_public_key ); Ok(None) } Err(e) => { error!( "DynamoDB client failed to get token for user {} with signing public key {}: {}", user_id, signing_public_key, e ); Err(Error::AwsSdk(e.into())) } } } pub async fn verify_access_token( &self, user_id: String, signing_public_key: String, access_token_to_verify: String, ) -> Result { let is_valid = self .get_access_token_data(user_id, signing_public_key) .await? .map(|access_token_data| { constant_time_eq( access_token_data.access_token.as_bytes(), access_token_to_verify.as_bytes(), ) && access_token_data.is_valid() }) .unwrap_or(false); Ok(is_valid) } pub async fn put_access_token_data( &self, access_token_data: AccessTokenData, ) -> Result { let item = HashMap::from([ ( ACCESS_TOKEN_TABLE_PARTITION_KEY.to_string(), AttributeValue::S(access_token_data.user_id), ), ( ACCESS_TOKEN_SORT_KEY.to_string(), AttributeValue::S(access_token_data.signing_public_key), ), ( ACCESS_TOKEN_TABLE_TOKEN_ATTRIBUTE.to_string(), AttributeValue::S(access_token_data.access_token), ), ( ACCESS_TOKEN_TABLE_CREATED_ATTRIBUTE.to_string(), AttributeValue::S(access_token_data.created.to_rfc3339()), ), ( ACCESS_TOKEN_TABLE_AUTH_TYPE_ATTRIBUTE.to_string(), AttributeValue::S(match access_token_data.auth_type { AuthType::Password => "password".to_string(), AuthType::Wallet => "wallet".to_string(), }), ), ( ACCESS_TOKEN_TABLE_VALID_ATTRIBUTE.to_string(), AttributeValue::Bool(access_token_data.valid), ), ]); self .client .put_item() .table_name(ACCESS_TOKEN_TABLE) .set_item(Some(item)) .send() .await .map_err(|e| Error::AwsSdk(e.into())) } pub async fn delete_access_token_data( &self, user_id: String, device_id_key: String, ) -> Result<(), Error> { self .client .delete_item() .table_name(ACCESS_TOKEN_TABLE) .key( ACCESS_TOKEN_TABLE_PARTITION_KEY.to_string(), AttributeValue::S(user_id), ) .key( ACCESS_TOKEN_SORT_KEY.to_string(), AttributeValue::S(device_id_key), ) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; Ok(()) } pub async fn wallet_address_taken( &self, wallet_address: String, ) -> Result { let result = self .get_user_id_from_user_info(wallet_address, &AuthType::Wallet) .await?; Ok(result.is_some()) } pub async fn username_taken(&self, username: String) -> Result { let result = self .get_user_id_from_user_info(username, &AuthType::Password) .await?; Ok(result.is_some()) } pub async fn filter_out_taken_usernames( &self, usernames: Vec, ) -> Result, Error> { let db_usernames = self.get_all_usernames().await?; let db_usernames_set: HashSet = db_usernames.into_iter().collect(); let usernames_set: HashSet = usernames.into_iter().collect(); let available_usernames: Vec = usernames_set .difference(&db_usernames_set) .cloned() .collect(); Ok(available_usernames) } async fn get_user_from_user_info( &self, user_info: String, auth_type: &AuthType, ) -> Result>, Error> { let (index, attribute_name) = match auth_type { AuthType::Password => { (USERS_TABLE_USERNAME_INDEX, USERS_TABLE_USERNAME_ATTRIBUTE) } AuthType::Wallet => ( USERS_TABLE_WALLET_ADDRESS_INDEX, USERS_TABLE_WALLET_ADDRESS_ATTRIBUTE, ), }; match self .client .query() .table_name(USERS_TABLE) .index_name(index) .key_condition_expression(format!("{} = :u", attribute_name)) .expression_attribute_values(":u", AttributeValue::S(user_info.clone())) .send() .await { Ok(QueryOutput { items: Some(items), .. }) => { let num_items = items.len(); if num_items == 0 { return Ok(None); } if num_items > 1 { warn!( "{} user IDs associated with {} {}: {:?}", num_items, attribute_name, user_info, items ); } let first_item = items[0].clone(); let user_id = first_item .get(USERS_TABLE_PARTITION_KEY) .ok_or(DBItemError { attribute_name: USERS_TABLE_PARTITION_KEY.to_string(), attribute_value: None.into(), attribute_error: DBItemAttributeError::Missing, })? .as_s() .map_err(|_| DBItemError { attribute_name: USERS_TABLE_PARTITION_KEY.to_string(), attribute_value: first_item .get(USERS_TABLE_PARTITION_KEY) .cloned() .into(), attribute_error: DBItemAttributeError::IncorrectType, })?; let result = self.get_item_from_users_table(user_id).await?; Ok(result.item) } Ok(_) => { info!( "No item found for {} {} in users table", attribute_name, user_info ); Ok(None) } Err(e) => { error!( "DynamoDB client failed to get user from {} {}: {}", attribute_name, user_info, e ); Err(Error::AwsSdk(e.into())) } } } pub async fn get_keys_for_user_id( &self, user_id: &str, get_one_time_keys: bool, ) -> Result, Error> { let Some(user) = self.get_item_from_users_table(user_id).await?.item else { return Ok(None); }; self.get_keys_for_user(user, get_one_time_keys).await } async fn get_keys_for_user( &self, mut user: AttributeMap, get_one_time_keys: bool, ) -> Result, Error> { let user_id: String = user.take_attr(USERS_TABLE_PARTITION_KEY)?; let devices: AttributeMap = user.take_attr(USERS_TABLE_DEVICES_ATTRIBUTE)?; let mut devices_response = self.get_keys_for_user_devices(user_id).await?; for (device_id_key, device_info) in devices { let device_info_map = AttributeMap::try_from_attr(&device_id_key, Some(device_info))?; let mut device_info_string_map = HashMap::new(); for (attribute_name, attribute_value) in device_info_map { // Excluding one-time keys since we're moving them to a separate table if attribute_name == USERS_TABLE_DEVICES_MAP_NOTIF_ONE_TIME_KEYS_ATTRIBUTE_NAME || attribute_name == USERS_TABLE_DEVICES_MAP_CONTENT_ONE_TIME_KEYS_ATTRIBUTE_NAME { continue; } // Excluding info already taken from device list response if devices_response .get(&device_id_key) .map(|device| device.contains_key(&attribute_name)) .unwrap_or(false) { continue; } let attribute_value_str = String::try_from_attr(&attribute_name, Some(attribute_value))?; device_info_string_map.insert(attribute_name, attribute_value_str); } if get_one_time_keys { if let Some(notif_one_time_key) = self .get_one_time_key(&device_id_key, OlmAccountType::Notification) .await? { device_info_string_map .insert(NOTIF_ONE_TIME_KEY.to_string(), notif_one_time_key); } if let Some(content_one_time_key) = self .get_one_time_key(&device_id_key, OlmAccountType::Content) .await? { device_info_string_map .insert(CONTENT_ONE_TIME_KEY.to_string(), content_one_time_key); } } devices_response.insert(device_id_key, device_info_string_map); } Ok(Some(devices_response)) } pub async fn get_user_id_from_user_info( &self, user_info: String, auth_type: &AuthType, ) -> Result, Error> { match self .get_user_from_user_info(user_info.clone(), auth_type) .await { Ok(Some(mut user)) => user .take_attr(USERS_TABLE_PARTITION_KEY) .map(Some) .map_err(Error::Attribute), Ok(_) => Ok(None), Err(e) => Err(e), } } pub async fn get_user_id_and_password_file_from_username( &self, username: &str, ) -> Result)>, Error> { match self .get_user_from_user_info(username.to_string(), &AuthType::Password) .await { Ok(Some(mut user)) => { let user_id = user.take_attr(USERS_TABLE_PARTITION_KEY)?; let password_file = parse_registration_data_attribute( user.remove(USERS_TABLE_REGISTRATION_ATTRIBUTE), )?; Ok(Some((user_id, password_file))) } Ok(_) => { info!( "No item found for user {} in PAKE registration table", username ); Ok(None) } Err(e) => { error!( "DynamoDB client failed to get registration data for user {}: {}", username, e ); Err(e) } } } pub async fn get_item_from_users_table( &self, user_id: &str, ) -> Result { let primary_key = create_simple_primary_key(( USERS_TABLE_PARTITION_KEY.to_string(), user_id.to_string(), )); self .client .get_item() .table_name(USERS_TABLE) .set_key(Some(primary_key)) .consistent_read(true) .send() .await .map_err(|e| Error::AwsSdk(e.into())) } pub async fn get_user_identifier( &self, user_id: &str, ) -> Result { let user_info = self .get_item_from_users_table(user_id) .await? .item .ok_or(Error::MissingItem)?; Identifier::try_from(user_info).map_err(|e| { error!(user_id, "Database item is missing an identifier"); e }) } async fn get_all_usernames(&self) -> Result, Error> { let scan_output = self .client .scan() .table_name(USERS_TABLE) .projection_expression(USERS_TABLE_USERNAME_ATTRIBUTE) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; let mut result = Vec::new(); if let Some(attributes) = scan_output.items { for mut attribute in attributes { if let Ok(username) = attribute.take_attr(USERS_TABLE_USERNAME_ATTRIBUTE) { result.push(username); } } } Ok(result) } pub async fn add_nonce_to_nonces_table( &self, nonce_data: NonceData, ) -> Result { let item = HashMap::from([ ( NONCE_TABLE_PARTITION_KEY.to_string(), AttributeValue::S(nonce_data.nonce), ), ( NONCE_TABLE_CREATED_ATTRIBUTE.to_string(), AttributeValue::S(nonce_data.created.to_rfc3339()), ), ( NONCE_TABLE_EXPIRATION_TIME_ATTRIBUTE.to_string(), AttributeValue::S(nonce_data.expiration_time.to_rfc3339()), ), ( NONCE_TABLE_EXPIRATION_TIME_UNIX_ATTRIBUTE.to_string(), AttributeValue::N(nonce_data.expiration_time.timestamp().to_string()), ), ]); self .client .put_item() .table_name(NONCE_TABLE) .set_item(Some(item)) .send() .await .map_err(|e| Error::AwsSdk(e.into())) } pub async fn get_nonce_from_nonces_table( &self, nonce_value: impl Into, ) -> Result, Error> { let get_response = self .client .get_item() .table_name(NONCE_TABLE) .key( NONCE_TABLE_PARTITION_KEY, AttributeValue::S(nonce_value.into()), ) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; let Some(mut item) = get_response.item else { return Ok(None); }; let nonce = item.take_attr(NONCE_TABLE_PARTITION_KEY)?; let created = DateTime::::try_from_attr( NONCE_TABLE_CREATED_ATTRIBUTE, item.remove(NONCE_TABLE_CREATED_ATTRIBUTE), )?; let expiration_time = DateTime::::try_from_attr( NONCE_TABLE_EXPIRATION_TIME_ATTRIBUTE, item.remove(NONCE_TABLE_EXPIRATION_TIME_ATTRIBUTE), )?; Ok(Some(NonceData { nonce, created, expiration_time, })) } pub async fn remove_nonce_from_nonces_table( &self, nonce: impl Into, ) -> Result<(), Error> { self .client .delete_item() .table_name(NONCE_TABLE) .key(NONCE_TABLE_PARTITION_KEY, AttributeValue::S(nonce.into())) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; Ok(()) } pub async fn add_usernames_to_reserved_usernames_table( &self, usernames: Vec, ) -> Result<(), Error> { // A single call to BatchWriteItem can consist of up to 25 operations for usernames_chunk in usernames.chunks(25) { let write_requests = usernames_chunk .iter() .map(|username| { let put_request = PutRequest::builder() .item( RESERVED_USERNAMES_TABLE_PARTITION_KEY, AttributeValue::S(username.to_string()), ) .build(); WriteRequest::builder().put_request(put_request).build() }) .collect(); self .client .batch_write_item() .request_items(RESERVED_USERNAMES_TABLE, write_requests) .send() .await .map_err(|e| Error::AwsSdk(e.into()))?; } info!("Batch write item to reserved usernames table succeeded"); Ok(()) } pub async fn delete_username_from_reserved_usernames_table( &self, username: String, ) -> Result { debug!( "Attempting to delete username {} from reserved usernames table", username ); match self .client .delete_item() .table_name(RESERVED_USERNAMES_TABLE) .key( RESERVED_USERNAMES_TABLE_PARTITION_KEY, AttributeValue::S(username.clone()), ) .send() .await { Ok(out) => { info!( "Username {} has been deleted from reserved usernames table", username ); Ok(out) } Err(e) => { error!("DynamoDB client failed to delete username {} from reserved usernames table", username); Err(Error::AwsSdk(e.into())) } } } pub async fn username_in_reserved_usernames_table( &self, username: &str, ) -> Result { match self .client .get_item() .table_name(RESERVED_USERNAMES_TABLE) .key( RESERVED_USERNAMES_TABLE_PARTITION_KEY.to_string(), AttributeValue::S(username.to_string()), ) .consistent_read(true) .send() .await { Ok(GetItemOutput { item: Some(_), .. }) => Ok(true), Ok(_) => Ok(false), Err(e) => Err(Error::AwsSdk(e.into())), } } } type AttributeName = String; pub type DeviceKeys = HashMap; type Devices = HashMap; fn create_simple_primary_key( partition_key: (AttributeName, String), ) -> HashMap { HashMap::from([(partition_key.0, AttributeValue::S(partition_key.1))]) } fn create_composite_primary_key( partition_key: (AttributeName, String), sort_key: (AttributeName, String), ) -> HashMap { let mut primary_key = create_simple_primary_key(partition_key); primary_key.insert(sort_key.0, AttributeValue::S(sort_key.1)); primary_key } fn parse_auth_type_attribute( attribute: Option, ) -> Result { if let Some(AttributeValue::S(auth_type)) = &attribute { match auth_type.as_str() { "password" => Ok(AuthType::Password), "wallet" => Ok(AuthType::Wallet), _ => Err(DBItemError::new( ACCESS_TOKEN_TABLE_AUTH_TYPE_ATTRIBUTE.to_string(), attribute.into(), DBItemAttributeError::IncorrectType, )), } } else { Err(DBItemError::new( ACCESS_TOKEN_TABLE_AUTH_TYPE_ATTRIBUTE.to_string(), attribute.into(), DBItemAttributeError::Missing, )) } } fn parse_valid_attribute( attribute: Option, ) -> Result { match attribute { Some(AttributeValue::Bool(valid)) => Ok(valid), Some(_) => Err(DBItemError::new( ACCESS_TOKEN_TABLE_VALID_ATTRIBUTE.to_string(), attribute.into(), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( ACCESS_TOKEN_TABLE_VALID_ATTRIBUTE.to_string(), attribute.into(), DBItemAttributeError::Missing, )), } } fn parse_token_attribute( attribute: Option, ) -> Result { match attribute { Some(AttributeValue::S(token)) => Ok(token), Some(_) => Err(DBItemError::new( ACCESS_TOKEN_TABLE_TOKEN_ATTRIBUTE.to_string(), attribute.into(), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( ACCESS_TOKEN_TABLE_TOKEN_ATTRIBUTE.to_string(), attribute.into(), DBItemAttributeError::Missing, )), } } fn parse_registration_data_attribute( attribute: Option, ) -> Result, DBItemError> { match attribute { Some(AttributeValue::B(server_registration_bytes)) => { Ok(server_registration_bytes.into_inner()) } Some(_) => Err(DBItemError::new( USERS_TABLE_REGISTRATION_ATTRIBUTE.to_string(), attribute.into(), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( USERS_TABLE_REGISTRATION_ATTRIBUTE.to_string(), attribute.into(), DBItemAttributeError::Missing, )), } } #[deprecated(note = "Use `comm_lib` counterpart instead")] #[allow(dead_code)] fn parse_map_attribute( attribute_name: &str, attribute_value: Option, ) -> Result { match attribute_value { Some(AttributeValue::M(map)) => Ok(map), Some(_) => { error!( attribute = attribute_name, value = ?attribute_value, error_type = "IncorrectType", "Unexpected attribute type when parsing map attribute" ); Err(DBItemError::new( attribute_name.to_string(), attribute_value.into(), DBItemAttributeError::IncorrectType, )) } None => { error!( attribute = attribute_name, error_type = "Missing", "Attribute is missing" ); Err(DBItemError::new( attribute_name.to_string(), attribute_value.into(), DBItemAttributeError::Missing, )) } } } fn create_device_info( flattened_device_key_upload: FlattenedDeviceKeyUpload, social_proof: Option, ) -> AttributeMap { let mut device_info = HashMap::from([ ( USERS_TABLE_DEVICES_MAP_DEVICE_TYPE_ATTRIBUTE_NAME.to_string(), AttributeValue::S(flattened_device_key_upload.device_type.to_string()), ), ( USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_ATTRIBUTE_NAME.to_string(), AttributeValue::S(flattened_device_key_upload.key_payload), ), ( USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_SIGNATURE_ATTRIBUTE_NAME.to_string(), AttributeValue::S(flattened_device_key_upload.key_payload_signature), ), ( USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_ATTRIBUTE_NAME.to_string(), AttributeValue::S(flattened_device_key_upload.content_prekey), ), ( USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_SIGNATURE_ATTRIBUTE_NAME .to_string(), AttributeValue::S(flattened_device_key_upload.content_prekey_signature), ), ( USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_ATTRIBUTE_NAME.to_string(), AttributeValue::S(flattened_device_key_upload.notif_prekey), ), ( USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_SIGNATURE_ATTRIBUTE_NAME.to_string(), AttributeValue::S(flattened_device_key_upload.notif_prekey_signature), ), ]); if let Some(social_proof) = social_proof { device_info.insert( USERS_TABLE_DEVICES_MAP_SOCIAL_PROOF_ATTRIBUTE_NAME.to_string(), AttributeValue::S(social_proof), ); } device_info } #[cfg(test)] mod tests { use super::*; #[test] fn test_create_simple_primary_key() { let partition_key_name = "userID".to_string(); let partition_key_value = "12345".to_string(); let partition_key = (partition_key_name.clone(), partition_key_value.clone()); let mut primary_key = create_simple_primary_key(partition_key); assert_eq!(primary_key.len(), 1); let attribute = primary_key.remove(&partition_key_name); assert!(attribute.is_some()); assert_eq!(attribute, Some(AttributeValue::S(partition_key_value))); } #[test] fn test_create_composite_primary_key() { let partition_key_name = "userID".to_string(); let partition_key_value = "12345".to_string(); let partition_key = (partition_key_name.clone(), partition_key_value.clone()); let sort_key_name = "deviceID".to_string(); let sort_key_value = "54321".to_string(); let sort_key = (sort_key_name.clone(), sort_key_value.clone()); let mut primary_key = create_composite_primary_key(partition_key, sort_key); assert_eq!(primary_key.len(), 2); let partition_key_attribute = primary_key.remove(&partition_key_name); assert!(partition_key_attribute.is_some()); assert_eq!( partition_key_attribute, Some(AttributeValue::S(partition_key_value)) ); let sort_key_attribute = primary_key.remove(&sort_key_name); assert!(sort_key_attribute.is_some()); assert_eq!(sort_key_attribute, Some(AttributeValue::S(sort_key_value))) } #[test] fn validate_keys() { // Taken from test user let example_payload = r#"{\"notificationIdentityPublicKeys\":{\"curve25519\":\"DYmV8VdkjwG/VtC8C53morogNJhpTPT/4jzW0/cxzQo\",\"ed25519\":\"D0BV2Y7Qm36VUtjwyQTJJWYAycN7aMSJmhEsRJpW2mk\"},\"primaryIdentityPublicKeys\":{\"curve25519\":\"Y4ZIqzpE1nv83kKGfvFP6rifya0itRg2hifqYtsISnk\",\"ed25519\":\"cSlL+VLLJDgtKSPlIwoCZg0h0EmHlQoJC08uV/O+jvg\"}}"#; let serialized_payload = KeyPayload::from_str(example_payload).unwrap(); assert_eq!( serialized_payload .notification_identity_public_keys .curve25519, "DYmV8VdkjwG/VtC8C53morogNJhpTPT/4jzW0/cxzQo" ); } #[test] fn test_int_to_device_type() { let valid_result = DeviceType::try_from(3); assert!(valid_result.is_ok()); assert_eq!(valid_result.unwrap(), DeviceType::Android); let invalid_result = DeviceType::try_from(6); assert!(invalid_result.is_err()); } } diff --git a/services/identity/src/database/device_list.rs b/services/identity/src/database/device_list.rs index 4dea465bc..9dbe3db39 100644 --- a/services/identity/src/database/device_list.rs +++ b/services/identity/src/database/device_list.rs @@ -1,1301 +1,1301 @@ use std::collections::HashMap; use chrono::{DateTime, Utc}; use comm_lib::{ aws::ddb::{ operation::{get_item::GetItemOutput, query::builders::QueryFluentBuilder}, types::{ error::TransactionCanceledException, AttributeValue, Delete, DeleteRequest, Put, TransactWriteItem, Update, WriteRequest, }, }, database::{ AttributeExtractor, AttributeMap, DBItemAttributeError, DBItemError, DynamoDBError, TryFromAttribute, }, }; use tracing::{error, warn}; use crate::{ client_service::FlattenedDeviceKeyUpload, constants::{ devices_table::{self, *}, USERS_TABLE, USERS_TABLE_DEVICELIST_TIMESTAMP_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_SIGNATURE_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_SIGNATURE_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_SIGNATURE_ATTRIBUTE_NAME, USERS_TABLE_DEVICES_MAP_SOCIAL_PROOF_ATTRIBUTE_NAME, USERS_TABLE_PARTITION_KEY, }, ddb_utils::AttributesOptionExt, error::{DeviceListError, Error, FromAttributeValue}, grpc_services::protos::unauth::DeviceType, }; use super::DatabaseClient; #[derive(Clone, Debug)] pub struct DeviceRow { pub user_id: String, pub device_id: String, pub device_type: DeviceType, pub device_key_info: IdentityKeyInfo, pub content_prekey: PreKey, pub notif_prekey: PreKey, // migration-related data pub code_version: u64, /// Timestamp of last login (access token generation) pub login_time: DateTime, } #[derive(Clone, Debug)] pub struct DeviceListRow { pub user_id: String, pub timestamp: DateTime, pub device_ids: Vec, } #[derive(Clone, Debug)] pub struct IdentityKeyInfo { pub key_payload: String, pub key_payload_signature: String, pub social_proof: Option, } #[derive(Clone, Debug)] pub struct PreKey { pub pre_key: String, pub pre_key_signature: String, } impl DeviceRow { pub fn from_device_key_upload( user_id: impl Into, upload: FlattenedDeviceKeyUpload, social_proof: Option, code_version: u64, login_time: DateTime, ) -> Self { Self { user_id: user_id.into(), device_id: upload.device_id_key, device_type: DeviceType::from_str_name(upload.device_type.as_str_name()) .expect("DeviceType conversion failed. Identity client and server protos mismatch"), device_key_info: IdentityKeyInfo { key_payload: upload.key_payload, key_payload_signature: upload.key_payload_signature, social_proof, }, content_prekey: PreKey { pre_key: upload.content_prekey, pre_key_signature: upload.content_prekey_signature, }, notif_prekey: PreKey { pre_key: upload.notif_prekey, pre_key_signature: upload.notif_prekey_signature, }, code_version, login_time, } } } impl DeviceListRow { /// Generates new device list row from given devices fn new(user_id: impl Into, device_ids: Vec) -> Self { Self { user_id: user_id.into(), device_ids, timestamp: Utc::now(), } } } // helper structs for converting to/from attribute values for sort key (a.k.a itemID) struct DeviceIDAttribute(String); struct DeviceListKeyAttribute(DateTime); impl From for AttributeValue { fn from(value: DeviceIDAttribute) -> Self { AttributeValue::S(format!("{DEVICE_ITEM_KEY_PREFIX}{}", value.0)) } } impl From for AttributeValue { fn from(value: DeviceListKeyAttribute) -> Self { AttributeValue::S(format!( "{DEVICE_LIST_KEY_PREFIX}{}", value.0.to_rfc3339() )) } } impl TryFrom> for DeviceIDAttribute { type Error = DBItemError; fn try_from(value: Option) -> Result { let item_id = String::try_from_attr(ATTR_ITEM_ID, value)?; // remove the device- prefix let device_id = item_id .strip_prefix(DEVICE_ITEM_KEY_PREFIX) .ok_or_else(|| DBItemError { attribute_name: ATTR_ITEM_ID.to_string(), attribute_value: item_id.clone().into(), attribute_error: DBItemAttributeError::InvalidValue, })? .to_string(); Ok(Self(device_id)) } } impl TryFrom> for DeviceListKeyAttribute { type Error = DBItemError; fn try_from(value: Option) -> Result { let item_id = String::try_from_attr(ATTR_ITEM_ID, value)?; // remove the device-list- prefix, then parse the timestamp let timestamp: DateTime = item_id .strip_prefix(DEVICE_LIST_KEY_PREFIX) .ok_or_else(|| DBItemError { attribute_name: ATTR_ITEM_ID.to_string(), attribute_value: item_id.clone().into(), attribute_error: DBItemAttributeError::InvalidValue, }) .and_then(|s| { s.parse().map_err(|e| { DBItemError::new( ATTR_ITEM_ID.to_string(), item_id.clone().into(), DBItemAttributeError::InvalidTimestamp(e), ) }) })?; Ok(Self(timestamp)) } } impl TryFrom for DeviceRow { type Error = DBItemError; fn try_from(mut attrs: AttributeMap) -> Result { let user_id = attrs.take_attr(ATTR_USER_ID)?; let DeviceIDAttribute(device_id) = attrs.remove(ATTR_ITEM_ID).try_into()?; let raw_device_type: String = attrs.take_attr(ATTR_DEVICE_TYPE)?; let device_type = DeviceType::from_str_name(&raw_device_type).ok_or_else(|| { DBItemError::new( ATTR_DEVICE_TYPE.to_string(), raw_device_type.into(), DBItemAttributeError::InvalidValue, ) })?; let device_key_info = attrs .remove(ATTR_DEVICE_KEY_INFO) .ok_or_missing(ATTR_DEVICE_KEY_INFO)? .to_hashmap(ATTR_DEVICE_KEY_INFO) .cloned() .and_then(IdentityKeyInfo::try_from)?; let content_prekey = attrs .remove(ATTR_CONTENT_PREKEY) .ok_or_missing(ATTR_CONTENT_PREKEY)? .to_hashmap(ATTR_CONTENT_PREKEY) .cloned() .and_then(PreKey::try_from)?; let notif_prekey = attrs .remove(ATTR_NOTIF_PREKEY) .ok_or_missing(ATTR_NOTIF_PREKEY)? .to_hashmap(ATTR_NOTIF_PREKEY) .cloned() .and_then(PreKey::try_from)?; let code_version = attrs .remove(ATTR_CODE_VERSION) .and_then(|attr| attr.as_n().ok().cloned()) .and_then(|val| val.parse::().ok()) .unwrap_or_default(); let login_time: DateTime = attrs.take_attr(ATTR_LOGIN_TIME)?; Ok(Self { user_id, device_id, device_type, device_key_info, content_prekey, notif_prekey, code_version, login_time, }) } } impl From for AttributeMap { fn from(value: DeviceRow) -> Self { HashMap::from([ (ATTR_USER_ID.to_string(), AttributeValue::S(value.user_id)), ( ATTR_ITEM_ID.to_string(), DeviceIDAttribute(value.device_id).into(), ), ( ATTR_DEVICE_TYPE.to_string(), AttributeValue::S(value.device_type.as_str_name().to_string()), ), ( ATTR_DEVICE_KEY_INFO.to_string(), value.device_key_info.into(), ), (ATTR_CONTENT_PREKEY.to_string(), value.content_prekey.into()), (ATTR_NOTIF_PREKEY.to_string(), value.notif_prekey.into()), // migration attributes ( ATTR_CODE_VERSION.to_string(), AttributeValue::N(value.code_version.to_string()), ), ( ATTR_LOGIN_TIME.to_string(), AttributeValue::S(value.login_time.to_rfc3339()), ), ]) } } impl From for AttributeValue { fn from(value: IdentityKeyInfo) -> Self { let mut attrs = HashMap::from([ ( ATTR_KEY_PAYLOAD.to_string(), AttributeValue::S(value.key_payload), ), ( ATTR_KEY_PAYLOAD_SIGNATURE.to_string(), AttributeValue::S(value.key_payload_signature), ), ]); if let Some(social_proof) = value.social_proof { attrs.insert( ATTR_SOCIAL_PROOF.to_string(), AttributeValue::S(social_proof), ); } AttributeValue::M(attrs) } } impl TryFrom for IdentityKeyInfo { type Error = DBItemError; fn try_from(mut attrs: AttributeMap) -> Result { let key_payload = attrs.take_attr(ATTR_KEY_PAYLOAD)?; let key_payload_signature = attrs.take_attr(ATTR_KEY_PAYLOAD_SIGNATURE)?; // social proof is optional let social_proof = attrs .remove(ATTR_SOCIAL_PROOF) .map(|attr| attr.to_string(ATTR_SOCIAL_PROOF).cloned()) .transpose()?; Ok(Self { key_payload, key_payload_signature, social_proof, }) } } impl From for AttributeValue { fn from(value: PreKey) -> Self { let attrs = HashMap::from([ (ATTR_PREKEY.to_string(), AttributeValue::S(value.pre_key)), ( ATTR_PREKEY_SIGNATURE.to_string(), AttributeValue::S(value.pre_key_signature), ), ]); AttributeValue::M(attrs) } } impl TryFrom for PreKey { type Error = DBItemError; fn try_from(mut attrs: AttributeMap) -> Result { let pre_key = attrs.take_attr(ATTR_PREKEY)?; let pre_key_signature = attrs.take_attr(ATTR_PREKEY_SIGNATURE)?; Ok(Self { pre_key, pre_key_signature, }) } } impl TryFrom for DeviceListRow { type Error = DBItemError; fn try_from(mut attrs: AttributeMap) -> Result { let user_id = attrs.take_attr(ATTR_USER_ID)?; let DeviceListKeyAttribute(timestamp) = attrs.remove(ATTR_ITEM_ID).try_into()?; // validate timestamps are in sync let timestamps_match = attrs .remove(ATTR_TIMESTAMP) .and_then(|attr| attr.as_n().ok().cloned()) .and_then(|val| val.parse::().ok()) .filter(|val| *val == timestamp.timestamp_millis()) .is_some(); if !timestamps_match { warn!( "DeviceList timestamp mismatch for (userID={}, itemID={})", &user_id, timestamp.to_rfc3339() ); } let device_ids: Vec = attrs.take_attr(ATTR_DEVICE_IDS)?; Ok(Self { user_id, timestamp, device_ids, }) } } impl From for AttributeMap { fn from(device_list: DeviceListRow) -> Self { let mut attrs = HashMap::new(); attrs.insert( ATTR_USER_ID.to_string(), AttributeValue::S(device_list.user_id.clone()), ); attrs.insert( ATTR_ITEM_ID.to_string(), DeviceListKeyAttribute(device_list.timestamp).into(), ); attrs.insert( ATTR_TIMESTAMP.to_string(), AttributeValue::N(device_list.timestamp.timestamp_millis().to_string()), ); attrs.insert( ATTR_DEVICE_IDS.to_string(), AttributeValue::L( device_list .device_ids .into_iter() .map(AttributeValue::S) .collect(), ), ); attrs } } impl DatabaseClient { /// Retrieves user's current devices and their full data pub async fn get_current_devices( &self, user_id: impl Into, ) -> Result, Error> { let response = query_rows_with_prefix(self, user_id, DEVICE_ITEM_KEY_PREFIX) .send() .await .map_err(|e| { error!("Failed to get current devices: {:?}", e); Error::AwsSdk(e.into()) })?; let Some(rows) = response.items else { return Ok(Vec::new()); }; rows .into_iter() .map(DeviceRow::try_from) .collect::, DBItemError>>() .map_err(Error::from) } /// Gets user's device list history pub async fn get_device_list_history( &self, user_id: impl Into, since: Option>, ) -> Result, Error> { let rows = if let Some(since) = since { // When timestamp is provided, it's better to query device lists by timestamp LSI self .client .query() .table_name(devices_table::NAME) .index_name(devices_table::TIMESTAMP_INDEX_NAME) .consistent_read(true) .key_condition_expression("#user_id = :user_id AND #timestamp > :since") .expression_attribute_names("#user_id", ATTR_USER_ID) .expression_attribute_names("#timestamp", ATTR_TIMESTAMP) .expression_attribute_values( ":user_id", AttributeValue::S(user_id.into()), ) .expression_attribute_values( ":since", AttributeValue::N(since.timestamp_millis().to_string()), ) .send() .await .map_err(|e| { error!("Failed to query device list updates by index: {:?}", e); Error::AwsSdk(e.into()) })? .items } else { // Query all device lists for user query_rows_with_prefix(self, user_id, DEVICE_LIST_KEY_PREFIX) .send() .await .map_err(|e| { error!("Failed to query device list updates (all): {:?}", e); Error::AwsSdk(e.into()) })? .items }; rows .unwrap_or_default() .into_iter() .map(DeviceListRow::try_from) .collect::, DBItemError>>() .map_err(Error::from) } /// Returns all devices' keys for the given user. Response is in the same format /// as [DatabaseClient::get_keys_for_user] for compatibility reasons. pub async fn get_keys_for_user_devices( &self, user_id: impl Into, ) -> Result { let user_devices = self.get_current_devices(user_id).await?; let user_devices_keys: super::Devices = user_devices .into_iter() .map(|device| { // convert this to the map type returned by DatabaseClient::get_keys_for_user let mut device_keys: super::DeviceKeys = HashMap::from([ ( USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_ATTRIBUTE_NAME.to_string(), device.device_key_info.key_payload, ), ( USERS_TABLE_DEVICES_MAP_KEY_PAYLOAD_SIGNATURE_ATTRIBUTE_NAME .to_string(), device.device_key_info.key_payload_signature, ), ( USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_ATTRIBUTE_NAME.to_string(), device.content_prekey.pre_key, ), ( USERS_TABLE_DEVICES_MAP_CONTENT_PREKEY_SIGNATURE_ATTRIBUTE_NAME .to_string(), device.content_prekey.pre_key_signature, ), ( USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_ATTRIBUTE_NAME.to_string(), device.notif_prekey.pre_key, ), ( USERS_TABLE_DEVICES_MAP_NOTIF_PREKEY_SIGNATURE_ATTRIBUTE_NAME .to_string(), device.notif_prekey.pre_key_signature, ), ]); if let Some(social_proof) = device.device_key_info.social_proof { device_keys.insert( USERS_TABLE_DEVICES_MAP_SOCIAL_PROOF_ATTRIBUTE_NAME.to_string(), social_proof, ); } (device.device_id, device_keys) }) .collect(); Ok(user_devices_keys) } pub async fn update_device_prekeys( &self, user_id: impl Into, device_id: impl Into, content_prekey: String, content_prekey_signature: String, notif_prekey: String, notif_prekey_signature: String, ) -> Result<(), Error> { let content_prekey = PreKey { pre_key: content_prekey, pre_key_signature: content_prekey_signature, }; let notif_prekey = PreKey { pre_key: notif_prekey, pre_key_signature: notif_prekey_signature, }; self .client .update_item() .table_name(devices_table::NAME) .key(ATTR_USER_ID, AttributeValue::S(user_id.into())) .key(ATTR_ITEM_ID, DeviceIDAttribute(device_id.into()).into()) .condition_expression( "attribute_exists(#user_id) AND attribute_exists(#item_id)", ) .update_expression( "SET #content_prekey = :content_prekey, #notif_prekey = :notif_prekey", ) .expression_attribute_names("#user_id", ATTR_USER_ID) .expression_attribute_names("#item_id", ATTR_ITEM_ID) .expression_attribute_names("#content_prekey", ATTR_CONTENT_PREKEY) .expression_attribute_names("#notif_prekey", ATTR_NOTIF_PREKEY) .expression_attribute_values(":content_prekey", content_prekey.into()) .expression_attribute_values(":notif_prekey", notif_prekey.into()) .send() .await .map_err(|e| { error!("Failed to update device prekeys: {:?}", e); Error::AwsSdk(e.into()) })?; Ok(()) } /// Checks if given device exists on user's current device list pub async fn device_exists( &self, user_id: impl Into, device_id: impl Into, ) -> Result { let GetItemOutput { item, .. } = self .client .get_item() .table_name(devices_table::NAME) .key(ATTR_USER_ID, AttributeValue::S(user_id.into())) .key(ATTR_ITEM_ID, DeviceIDAttribute(device_id.into()).into()) // only fetch the primary key, we don't need the rest .projection_expression(format!("{ATTR_USER_ID}, {ATTR_ITEM_ID}")) .send() .await .map_err(|e| { error!("Failed to check if device exists: {:?}", e); Error::AwsSdk(e.into()) })?; Ok(item.is_some()) } /// Required only for migration purposes (determining primary device) pub async fn update_device_login_time( &self, user_id: impl Into, device_id: impl Into, login_time: DateTime, ) -> Result<(), Error> { self .client .update_item() .table_name(devices_table::NAME) .key(ATTR_USER_ID, AttributeValue::S(user_id.into())) .key(ATTR_ITEM_ID, DeviceIDAttribute(device_id.into()).into()) .condition_expression( "attribute_exists(#user_id) AND attribute_exists(#item_id)", ) .update_expression("SET #login_time = :login_time") .expression_attribute_names("#user_id", ATTR_USER_ID) .expression_attribute_names("#item_id", ATTR_ITEM_ID) .expression_attribute_names("#login_time", ATTR_LOGIN_TIME) .expression_attribute_values( ":login_time", AttributeValue::S(login_time.to_rfc3339()), ) .send() .await .map_err(|e| { error!("Failed to update device login time: {:?}", e); Error::AwsSdk(e.into()) })?; Ok(()) } pub async fn get_current_device_list( &self, user_id: impl Into, ) -> Result, Error> { self .client .query() .table_name(devices_table::NAME) .index_name(devices_table::TIMESTAMP_INDEX_NAME) .consistent_read(true) .key_condition_expression("#user_id = :user_id") // sort descending .scan_index_forward(false) .expression_attribute_names("#user_id", ATTR_USER_ID) .expression_attribute_values( ":user_id", AttributeValue::S(user_id.into()), ) .limit(1) .send() .await .map_err(|e| { error!("Failed to query device list updates by index: {:?}", e); Error::AwsSdk(e.into()) })? .items .and_then(|mut items| items.pop()) .map(DeviceListRow::try_from) .transpose() .map_err(Error::from) } /// Adds new device to user's device list. If the device already exists, the /// operation fails. Transactionally generates new device list version. pub async fn add_device( &self, user_id: impl Into, device_key_upload: FlattenedDeviceKeyUpload, social_proof: Option, code_version: u64, login_time: DateTime, ) -> Result<(), Error> { let user_id: String = user_id.into(); self .transact_update_devicelist(&user_id, |device_ids, mut devices_data| { let new_device = DeviceRow::from_device_key_upload( &user_id, device_key_upload, social_proof, code_version, login_time, ); if device_ids.iter().any(|id| &new_device.device_id == id) { warn!( "Device already exists in user's device list \ (userID={}, deviceID={})", &user_id, &new_device.device_id ); return Err(Error::DeviceList(DeviceListError::DeviceAlreadyExists)); } device_ids.push(new_device.device_id.clone()); // Reorder devices (determine primary device again) devices_data.push(new_device.clone()); migration::reorder_device_list(&user_id, device_ids, &devices_data); // Put new device let put_device = Put::builder() .table_name(devices_table::NAME) .set_item(Some(new_device.into())) .condition_expression( "attribute_not_exists(#user_id) AND attribute_not_exists(#item_id)", ) .expression_attribute_names("#user_id", ATTR_USER_ID) .expression_attribute_names("#item_id", ATTR_ITEM_ID) .build(); let put_device_operation = TransactWriteItem::builder().put(put_device).build(); Ok(put_device_operation) }) .await } /// Removes device from user's device list. If the device doesn't exist, the /// operation fails. Transactionally generates new device list version. pub async fn remove_device( &self, user_id: impl Into, device_id: impl AsRef, ) -> Result<(), Error> { let user_id: String = user_id.into(); let device_id = device_id.as_ref(); self .transact_update_devicelist(&user_id, |device_ids, mut devices_data| { let device_exists = device_ids.iter().any(|id| id == device_id); if !device_exists { warn!( "Device doesn't exist in user's device list \ (userID={}, deviceID={})", &user_id, device_id ); return Err(Error::DeviceList(DeviceListError::DeviceNotFound)); } device_ids.retain(|id| id != device_id); // Reorder devices (determine primary device again) devices_data.retain(|d| d.device_id != device_id); migration::reorder_device_list(&user_id, device_ids, &devices_data); // Delete device DDB operation let delete_device = Delete::builder() .table_name(devices_table::NAME) .key(ATTR_USER_ID, AttributeValue::S(user_id.clone())) .key( ATTR_ITEM_ID, DeviceIDAttribute(device_id.to_string()).into(), ) .condition_expression( "attribute_exists(#user_id) AND attribute_exists(#item_id)", ) .expression_attribute_names("#user_id", ATTR_USER_ID) .expression_attribute_names("#item_id", ATTR_ITEM_ID) .build(); let operation = TransactWriteItem::builder().delete(delete_device).build(); Ok(operation) }) .await } /// Performs a transactional update of the device list for the user. Afterwards /// generates a new device list and updates the timestamp in the users table. /// This is done in a transaction. Operation fails if the device list has been /// updated concurrently (timestamp mismatch). async fn transact_update_devicelist( &self, user_id: &str, // The closure performing a transactional update of the device list. // It receives two arguments: // 1. A mutable reference to the current device list (ordered device IDs). // 2. Details (full data) of the current devices (unordered). // The closure should return a transactional DDB // operation to be performed when updating the device list. action: impl FnOnce( &mut Vec, Vec, ) -> Result, ) -> Result<(), Error> { let previous_timestamp = get_current_devicelist_timestamp(self, user_id).await?; let current_devices_data = self.get_current_devices(user_id).await?; let mut device_ids = self .get_current_device_list(user_id) .await? .map(|device_list| device_list.device_ids) .unwrap_or_default(); // Perform the update action, then generate new device list let operation = action(&mut device_ids, current_devices_data)?; let new_device_list = DeviceListRow::new(user_id, device_ids); // Update timestamp in users table let timestamp_update_operation = device_list_timestamp_update_operation( user_id, previous_timestamp, new_device_list.timestamp, ); // Put updated device list (a new version) let put_device_list = Put::builder() .table_name(devices_table::NAME) .set_item(Some(new_device_list.into())) .condition_expression( "attribute_not_exists(#user_id) AND attribute_not_exists(#item_id)", ) .expression_attribute_names("#user_id", ATTR_USER_ID) .expression_attribute_names("#item_id", ATTR_ITEM_ID) .build(); let put_device_list_operation = TransactWriteItem::builder().put(put_device_list).build(); self .client .transact_write_items() .transact_items(operation) .transact_items(put_device_list_operation) .transact_items(timestamp_update_operation) .send() .await .map_err(|e| match DynamoDBError::from(e) { DynamoDBError::TransactionCanceledException( TransactionCanceledException { cancellation_reasons: Some(reasons), .. }, ) if reasons .iter() .any(|reason| reason.code() == Some("ConditionalCheckFailed")) => { Error::DeviceList(DeviceListError::ConcurrentUpdateError) } other => { error!("Device list update transaction failed: {:?}", other); Error::AwsSdk(other) } })?; Ok(()) } /// Deletes all user data from devices table pub async fn delete_devices_table_rows_for_user( &self, user_id: impl Into, ) -> Result<(), Error> { // 1. get all rows // 2. batch write delete all // we project only the primary keys so we can pass these directly to delete requests let primary_keys = self .client .query() .table_name(devices_table::NAME) .projection_expression("#user_id, #item_id") .key_condition_expression("#user_id = :user_id") .expression_attribute_names("#user_id", ATTR_USER_ID) .expression_attribute_names("#item_id", ATTR_ITEM_ID) .expression_attribute_values( ":user_id", AttributeValue::S(user_id.into()), ) .consistent_read(true) .send() .await .map_err(|e| { error!("Failed to list user's items in devices table: {:?}", e); Error::AwsSdk(e.into()) })? .items .unwrap_or_default(); let delete_requests = primary_keys .into_iter() .map(|item| { let request = DeleteRequest::builder().set_key(Some(item)).build(); WriteRequest::builder().delete_request(request).build() }) .collect::>(); // TODO: We can use the batch write helper from comm-services-lib when integrated for batch in delete_requests.chunks(25) { self .client .batch_write_item() .request_items(devices_table::NAME, batch.to_vec()) .send() .await .map_err(|e| { error!("Failed to batch delete items from devices table: {:?}", e); Error::AwsSdk(e.into()) })?; } Ok(()) } } /// Gets timestamp of user's current device list. Returns None if the user /// doesn't have a device list yet. Storing the timestamp in the users table is /// required for consistency. It's used as a condition when updating the device /// list. async fn get_current_devicelist_timestamp( db: &crate::database::DatabaseClient, user_id: impl Into, ) -> Result>, Error> { let response = db .client .get_item() .table_name(USERS_TABLE) .key(USERS_TABLE_PARTITION_KEY, AttributeValue::S(user_id.into())) .projection_expression(USERS_TABLE_DEVICELIST_TIMESTAMP_ATTRIBUTE_NAME) .send() .await .map_err(|e| { error!("Failed to get user's device list timestamp: {:?}", e); Error::AwsSdk(e.into()) })?; let mut user_item = response.item.unwrap_or_default(); let raw_datetime = user_item.remove(USERS_TABLE_DEVICELIST_TIMESTAMP_ATTRIBUTE_NAME); // existing records will not have this field when // updating device list for the first time if raw_datetime.is_none() { return Ok(None); } let timestamp = DateTime::::try_from_attr( USERS_TABLE_DEVICELIST_TIMESTAMP_ATTRIBUTE_NAME, raw_datetime, )?; Ok(Some(timestamp)) } /// Generates update expression for current device list timestamp in users table. /// The previous timestamp is used as a condition to ensure that the value hasn't changed /// since we got it. This avoids race conditions when updating the device list. fn device_list_timestamp_update_operation( user_id: impl Into, previous_timestamp: Option>, new_timestamp: DateTime, ) -> TransactWriteItem { let update_builder = match previous_timestamp { Some(previous_timestamp) => Update::builder() .condition_expression("#device_list_timestamp = :previous_timestamp") .expression_attribute_values( ":previous_timestamp", AttributeValue::S(previous_timestamp.to_rfc3339()), ), // If there's no previous timestamp, the attribute shouldn't exist yet None => Update::builder() .condition_expression("attribute_not_exists(#device_list_timestamp)"), }; let update = update_builder .table_name(USERS_TABLE) .key(USERS_TABLE_PARTITION_KEY, AttributeValue::S(user_id.into())) .update_expression("SET #device_list_timestamp = :new_timestamp") .expression_attribute_names( "#device_list_timestamp", USERS_TABLE_DEVICELIST_TIMESTAMP_ATTRIBUTE_NAME, ) .expression_attribute_values( ":new_timestamp", AttributeValue::S(new_timestamp.to_rfc3339()), ) .build(); TransactWriteItem::builder().update(update).build() } /// Helper function to query rows by given sort key prefix fn query_rows_with_prefix( db: &crate::database::DatabaseClient, user_id: impl Into, prefix: &'static str, ) -> QueryFluentBuilder { db.client .query() .table_name(devices_table::NAME) .key_condition_expression( "#user_id = :user_id AND begins_with(#item_id, :device_prefix)", ) .expression_attribute_names("#user_id", ATTR_USER_ID) .expression_attribute_names("#item_id", ATTR_ITEM_ID) .expression_attribute_values(":user_id", AttributeValue::S(user_id.into())) .expression_attribute_values( ":device_prefix", AttributeValue::S(prefix.to_string()), ) .consistent_read(true) } // Helper module for "migration" code into new device list schema. // We can get rid of this when primary device takes over the responsibility // of managing the device list. mod migration { use std::{cmp::Ordering, collections::HashSet}; use tracing::{debug, error, info}; use super::*; pub(super) fn reorder_device_list( user_id: &str, list: &mut [String], devices_data: &[DeviceRow], ) { if !verify_device_list_match(list, devices_data) { error!("Found corrupt device list for user (userID={})!", user_id); return; } let Some(first_device) = list.first() else { debug!("Skipping device list rotation. Nothing to reorder."); return; }; let Some(primary_device) = determine_primary_device(devices_data) else { info!( "No valid primary device found for user (userID={}).\ Skipping device list reorder.", user_id ); return; }; if first_device == &primary_device.device_id { debug!("Skipping device list reorder. Primary device is already first"); return; } // swap primary device with the first one - let Some(primary_device_idx) = list - .iter() - .position(|id| id == &primary_device.device_id) else { - error!( - "Primary device not found in device list (userID={})", - user_id - ); - return; - }; + let Some(primary_device_idx) = + list.iter().position(|id| id == &primary_device.device_id) + else { + error!( + "Primary device not found in device list (userID={})", + user_id + ); + return; + }; list.swap(0, primary_device_idx); info!("Reordered device list for user (userID={})", user_id); } // checks if device list matches given devices data fn verify_device_list_match( list: &[String], devices_data: &[DeviceRow], ) -> bool { if list.len() != devices_data.len() { error!("Device list length mismatch!"); return false; } let actual_device_ids = devices_data .iter() .map(|device| &device.device_id) .collect::>(); let device_list_set = list.iter().collect::>(); if let Some(corrupt_device_id) = device_list_set.difference(&actual_device_ids).next() { error!( "Device list is corrupt (unknown deviceID={})", corrupt_device_id ); return false; } true } /// Returns reference to primary device (if any) from given list of devices /// or None if there's no valid primary device. fn determine_primary_device(devices: &[DeviceRow]) -> Option<&DeviceRow> { // 1. Find mobile devices with valid token // 2. Prioritize these with latest code version // 3. If there's a tie, select the one with latest login time let mut mobile_devices = devices .iter() .filter(|device| { device.device_type == DeviceType::Ios || device.device_type == DeviceType::Android }) .collect::>(); mobile_devices.sort_by(|a, b| { let code_version_cmp = b.code_version.cmp(&a.code_version); if code_version_cmp == Ordering::Equal { b.login_time.cmp(&a.login_time) } else { code_version_cmp } }); mobile_devices.first().cloned() } mod tests { use super::*; use chrono::Duration; #[test] fn reorder_skips_no_devices() { let mut list = vec![]; reorder_device_list("", &mut list, &[]); assert_eq!(list, Vec::::new()); } #[test] fn reorder_skips_single_device() { let mut list = vec!["test".into()]; let devices_data = vec![create_test_device("test", DeviceType::Web, 0, Utc::now())]; reorder_device_list("", &mut list, &devices_data); assert_eq!(list, vec!["test"]); } #[test] fn reorder_skips_for_valid_list() { let mut list = vec!["mobile".into(), "web".into()]; let devices_data = vec![ create_test_device("mobile", DeviceType::Android, 1, Utc::now()), create_test_device("web", DeviceType::Web, 0, Utc::now()), ]; reorder_device_list("", &mut list, &devices_data); assert_eq!(list, vec!["mobile", "web"]); } #[test] fn reorder_swaps_primary_device_when_possible() { let mut list = vec!["web".into(), "mobile".into()]; let devices_data = vec![ create_test_device("web", DeviceType::Web, 0, Utc::now()), create_test_device("mobile", DeviceType::Android, 1, Utc::now()), ]; reorder_device_list("", &mut list, &devices_data); assert_eq!(list, vec!["mobile", "web"]); } #[test] fn determine_primary_device_returns_none_for_empty_list() { let devices = vec![]; assert!(determine_primary_device(&devices).is_none()); } #[test] fn determine_primary_device_returns_none_for_web_only() { let devices = vec![create_test_device("web", DeviceType::Web, 0, Utc::now())]; assert!( determine_primary_device(&devices).is_none(), "Primary device should be None for web-only devices" ); } #[test] fn determine_primary_device_prioritizes_mobile() { let devices = vec![ create_test_device("mobile", DeviceType::Android, 0, Utc::now()), create_test_device("web", DeviceType::Web, 0, Utc::now()), ]; let primary_device = determine_primary_device(&devices) .expect("Primary device should be present"); assert_eq!( primary_device.device_id, "mobile", "Primary device should be mobile" ); } #[test] fn determine_primary_device_prioritizes_latest_code_version() { let devices_with_latest_code_version = vec![ create_test_device("mobile1", DeviceType::Android, 1, Utc::now()), create_test_device("mobile2", DeviceType::Android, 2, Utc::now()), create_test_device("web", DeviceType::Web, 0, Utc::now()), ]; let primary_device = determine_primary_device(&devices_with_latest_code_version) .expect("Primary device should be present"); assert_eq!( primary_device.device_id, "mobile2", "Primary device should be mobile with latest code version" ); } #[test] fn determine_primary_device_prioritizes_latest_login_time() { let devices = vec![ create_test_device("mobile1_today", DeviceType::Ios, 1, Utc::now()), create_test_device( "mobile2_yesterday", DeviceType::Android, 1, Utc::now() - Duration::days(1), ), create_test_device("web", DeviceType::Web, 0, Utc::now()), ]; let primary_device = determine_primary_device(&devices) .expect("Primary device should be present"); assert_eq!( primary_device.device_id, "mobile1_today", "Primary device should be mobile with latest login time" ); } #[test] fn determine_primary_device_keeps_deterministic_order() { // Given two identical devices, the first one should be selected as primary let today = Utc::now(); let devices_with_latest_code_version = vec![ create_test_device("mobile1", DeviceType::Android, 1, today), create_test_device("mobile2", DeviceType::Android, 1, today), ]; let primary_device = determine_primary_device(&devices_with_latest_code_version) .expect("Primary device should be present"); assert_eq!( primary_device.device_id, "mobile1", "Primary device selection should be deterministic" ); } #[test] fn determine_primary_device_all_rules_together() { use DeviceType::{Android, Ios, Web}; let today = Utc::now(); let yesterday = today - Duration::days(1); let devices = vec![ create_test_device("mobile1_today", Android, 1, today), create_test_device("mobile2_today", Android, 2, today), create_test_device("mobile3_yesterday", Ios, 1, yesterday), create_test_device("mobile4_yesterday", Ios, 2, yesterday), create_test_device("web", Web, 5, today), ]; let primary_device = determine_primary_device(&devices) .expect("Primary device should be present"); assert_eq!( primary_device.device_id, "mobile2_today", "Primary device should be mobile with latest code version and login time" ); } fn create_test_device( id: &str, platform: DeviceType, code_version: u64, login_time: DateTime, ) -> DeviceRow { DeviceRow { user_id: "test".into(), device_id: id.into(), device_type: platform, device_key_info: IdentityKeyInfo { key_payload: "".into(), key_payload_signature: "".into(), social_proof: None, }, content_prekey: PreKey { pre_key: "".into(), pre_key_signature: "".into(), }, notif_prekey: PreKey { pre_key: "".into(), pre_key_signature: "".into(), }, code_version, login_time, } } } } diff --git a/services/identity/src/ddb_utils.rs b/services/identity/src/ddb_utils.rs index f09143d75..8960f6b09 100644 --- a/services/identity/src/ddb_utils.rs +++ b/services/identity/src/ddb_utils.rs @@ -1,138 +1,138 @@ use chrono::{DateTime, NaiveDateTime, Utc}; use comm_lib::{ aws::ddb::types::{AttributeValue, PutRequest, WriteRequest}, - database::{AttributeExtractor, AttributeMap, Value}, + database::{AttributeExtractor, AttributeMap}, }; use std::collections::HashMap; use std::iter::IntoIterator; use comm_lib::database::{DBItemAttributeError, DBItemError}; use crate::constants::{ USERS_TABLE_DEVICES_MAP_SOCIAL_PROOF_ATTRIBUTE_NAME, USERS_TABLE_USERNAME_ATTRIBUTE, USERS_TABLE_WALLET_ADDRESS_ATTRIBUTE, }; #[derive(Copy, Clone, Debug)] pub enum OlmAccountType { Content, Notification, } // Prefix the one time keys with the olm account variant. This allows for a single // DDB table to contain both notification and content keys for a device. pub fn create_one_time_key_partition_key( device_id: &str, account_type: OlmAccountType, ) -> String { match account_type { OlmAccountType::Content => format!("content_{device_id}"), OlmAccountType::Notification => format!("notification_{device_id}"), } } fn create_one_time_key_put_request( device_id: &str, one_time_key: String, account_type: OlmAccountType, ) -> WriteRequest { use crate::constants::one_time_keys_table::*; let partition_key = create_one_time_key_partition_key(device_id, account_type); let builder = PutRequest::builder(); let attrs = HashMap::from([ (PARTITION_KEY.to_string(), AttributeValue::S(partition_key)), (SORT_KEY.to_string(), AttributeValue::S(one_time_key)), ]); let put_request = builder.set_item(Some(attrs)).build(); WriteRequest::builder().put_request(put_request).build() } pub fn into_one_time_put_requests( device_id: &str, one_time_keys: T, account_type: OlmAccountType, ) -> Vec where T: IntoIterator, ::Item: ToString, { one_time_keys .into_iter() .map(|otk| { create_one_time_key_put_request(device_id, otk.to_string(), account_type) }) .collect() } pub trait AttributesOptionExt { fn ok_or_missing( self, attr_name: impl Into, ) -> Result; } impl AttributesOptionExt for Option { fn ok_or_missing( self, attr_name: impl Into, ) -> Result { self.ok_or_else(|| DBItemError { attribute_name: attr_name.into(), attribute_value: None.into(), attribute_error: DBItemAttributeError::Missing, }) } } pub trait DateTimeExt { fn from_utc_timestamp_millis(timestamp: i64) -> Option>; } impl DateTimeExt for DateTime { fn from_utc_timestamp_millis(timestamp: i64) -> Option { let naive = NaiveDateTime::from_timestamp_millis(timestamp)?; Some(Self::from_utc(naive, Utc)) } } pub enum Identifier { Username(String), WalletAddress(EthereumIdentity), } pub struct EthereumIdentity { pub wallet_address: String, pub social_proof: String, } impl TryFrom for Identifier { type Error = crate::error::Error; fn try_from(mut value: AttributeMap) -> Result { let username_result = value.take_attr(USERS_TABLE_USERNAME_ATTRIBUTE); if let Ok(username) = username_result { return Ok(Identifier::Username(username)); } let wallet_address_result = value.take_attr(USERS_TABLE_WALLET_ADDRESS_ATTRIBUTE); let social_proof_result = value.take_attr(USERS_TABLE_DEVICES_MAP_SOCIAL_PROOF_ATTRIBUTE_NAME); if let (Ok(wallet_address), Ok(social_proof)) = (wallet_address_result, social_proof_result) { Ok(Identifier::WalletAddress(EthereumIdentity { wallet_address, social_proof, })) } else { Err(Self::Error::MalformedItem) } } } diff --git a/services/identity/src/error.rs b/services/identity/src/error.rs index d4c834f6e..0c106a894 100644 --- a/services/identity/src/error.rs +++ b/services/identity/src/error.rs @@ -1,130 +1,129 @@ use comm_lib::aws::ddb::types::AttributeValue; use comm_lib::aws::DynamoDBError; use comm_lib::database::{DBItemAttributeError, DBItemError, Value}; use std::collections::hash_map::HashMap; -use std::fmt::{Display, Formatter, Result as FmtResult}; use tracing::error; #[derive( Debug, derive_more::Display, derive_more::From, derive_more::Error, )] pub enum Error { #[display(...)] AwsSdk(DynamoDBError), #[display(...)] Attribute(DBItemError), #[display(...)] Transport(tonic::transport::Error), #[display(...)] Status(tonic::Status), #[display(...)] MissingItem, #[display(...)] DeviceList(DeviceListError), #[display(...)] MalformedItem, } #[derive(Debug, derive_more::Display, derive_more::Error)] pub enum DeviceListError { DeviceAlreadyExists, DeviceNotFound, ConcurrentUpdateError, } #[deprecated(note = "Use comm-lib traits instead")] pub trait FromAttributeValue { fn to_vec( &self, attr_name: &str, ) -> Result<&Vec, DBItemError>; fn to_string(&self, attr_name: &str) -> Result<&String, DBItemError>; fn to_hashmap( &self, attr_name: &str, ) -> Result<&HashMap, DBItemError>; } fn handle_attr_failure(value: &AttributeValue, attr_name: &str) -> DBItemError { DBItemError { attribute_name: attr_name.to_string(), attribute_value: Value::AttributeValue(Some(value.clone())), attribute_error: DBItemAttributeError::IncorrectType, } } impl FromAttributeValue for AttributeValue { fn to_vec( &self, attr_name: &str, ) -> Result<&Vec, DBItemError> { self.as_l().map_err(|e| handle_attr_failure(e, attr_name)) } fn to_string(&self, attr_name: &str) -> Result<&String, DBItemError> { self.as_s().map_err(|e| handle_attr_failure(e, attr_name)) } fn to_hashmap( &self, attr_name: &str, ) -> Result<&HashMap, DBItemError> { self.as_m().map_err(|e| handle_attr_failure(e, attr_name)) } } pub trait AttributeValueFromHashMap { fn get_string(&self, key: &str) -> Result<&String, DBItemError>; fn get_map( &self, key: &str, ) -> Result<&HashMap, DBItemError>; fn get_vec(&self, key: &str) -> Result<&Vec, DBItemError>; } impl AttributeValueFromHashMap for HashMap { fn get_string(&self, key: &str) -> Result<&String, DBItemError> { self .get(key) .ok_or(DBItemError { attribute_name: key.to_string(), attribute_value: None.into(), attribute_error: DBItemAttributeError::Missing, })? .to_string(key) } fn get_map( &self, key: &str, ) -> Result<&HashMap, DBItemError> { self .get(key) .ok_or(DBItemError { attribute_name: key.to_string(), attribute_value: None.into(), attribute_error: DBItemAttributeError::Missing, })? .to_hashmap(key) } fn get_vec(&self, key: &str) -> Result<&Vec, DBItemError> { self .get(key) .ok_or(DBItemError { attribute_name: key.to_string(), attribute_value: None.into(), attribute_error: DBItemAttributeError::Missing, })? .to_vec(key) } } pub fn consume_error(result: Result) { match result { Ok(_) => (), Err(e) => { error!("{}", e); } } } diff --git a/services/identity/src/grpc_services/authenticated.rs b/services/identity/src/grpc_services/authenticated.rs index 1dc98f34b..5ccb2171d 100644 --- a/services/identity/src/grpc_services/authenticated.rs +++ b/services/identity/src/grpc_services/authenticated.rs @@ -1,537 +1,536 @@ use std::collections::HashMap; use crate::config::CONFIG; use crate::database::DeviceListRow; use crate::grpc_utils::DeviceInfoWithAuth; use crate::{ client_service::{ handle_db_error, CacheExt, UpdateState, WorkflowInProgress, }, constants::request_metadata, database::DatabaseClient, ddb_utils::DateTimeExt, grpc_services::shared::get_value, token::AuthType, }; use chrono::{DateTime, Utc}; use comm_opaque2::grpc::protocol_error_to_grpc_status; use moka::future::Cache; use tonic::{Request, Response, Status}; use tracing::{debug, error}; use super::protos::auth::{ find_user_id_request, identity, - identity_client_service_server::IdentityClientService, EthereumIdentity, - FindUserIdRequest, FindUserIdResponse, GetDeviceListRequest, - GetDeviceListResponse, Identity, InboundKeyInfo, InboundKeysForUserRequest, - InboundKeysForUserResponse, KeyserverKeysResponse, OutboundKeyInfo, - OutboundKeysForUserRequest, OutboundKeysForUserResponse, - RefreshUserPrekeysRequest, UpdateUserPasswordFinishRequest, - UpdateUserPasswordStartRequest, UpdateUserPasswordStartResponse, - UploadOneTimeKeysRequest, + identity_client_service_server::IdentityClientService, FindUserIdRequest, + FindUserIdResponse, GetDeviceListRequest, GetDeviceListResponse, Identity, + InboundKeyInfo, InboundKeysForUserRequest, InboundKeysForUserResponse, + KeyserverKeysResponse, OutboundKeyInfo, OutboundKeysForUserRequest, + OutboundKeysForUserResponse, RefreshUserPrekeysRequest, + UpdateUserPasswordFinishRequest, UpdateUserPasswordStartRequest, + UpdateUserPasswordStartResponse, UploadOneTimeKeysRequest, }; use super::protos::unauth::{Empty, IdentityKeyInfo, Prekey}; #[derive(derive_more::Constructor)] pub struct AuthenticatedService { db_client: DatabaseClient, cache: Cache, } fn get_auth_info(req: &Request<()>) -> Option<(String, String, String)> { debug!("Retrieving auth info for request: {:?}", req); let user_id = get_value(req, request_metadata::USER_ID)?; let device_id = get_value(req, request_metadata::DEVICE_ID)?; let access_token = get_value(req, request_metadata::ACCESS_TOKEN)?; Some((user_id, device_id, access_token)) } pub fn auth_interceptor( req: Request<()>, db_client: &DatabaseClient, ) -> Result, Status> { debug!("Intercepting request to check auth info: {:?}", req); let (user_id, device_id, access_token) = get_auth_info(&req) .ok_or_else(|| Status::unauthenticated("Missing credentials"))?; let handle = tokio::runtime::Handle::current(); let new_db_client = db_client.clone(); // This function cannot be `async`, yet must call the async db call // Force tokio to resolve future in current thread without an explicit .await let valid_token = tokio::task::block_in_place(move || { handle .block_on(new_db_client.verify_access_token( user_id, device_id, access_token, )) .map_err(handle_db_error) })?; if !valid_token { return Err(Status::aborted("Bad Credentials")); } Ok(req) } pub fn get_user_and_device_id( request: &Request, ) -> Result<(String, String), Status> { let user_id = get_value(request, request_metadata::USER_ID) .ok_or_else(|| Status::unauthenticated("Missing user_id field"))?; let device_id = get_value(request, request_metadata::DEVICE_ID) .ok_or_else(|| Status::unauthenticated("Missing device_id field"))?; Ok((user_id, device_id)) } #[tonic::async_trait] impl IdentityClientService for AuthenticatedService { async fn refresh_user_prekeys( &self, request: Request, ) -> Result, Status> { let (user_id, device_id) = get_user_and_device_id(&request)?; let message = request.into_inner(); debug!("Refreshing prekeys for user: {}", user_id); let content_keys = message .new_content_prekeys .ok_or_else(|| Status::invalid_argument("Missing content keys"))?; let notif_keys = message .new_notif_prekeys .ok_or_else(|| Status::invalid_argument("Missing notification keys"))?; self .db_client .set_prekey( user_id, device_id, content_keys.prekey, content_keys.prekey_signature, notif_keys.prekey, notif_keys.prekey_signature, ) .await .map_err(handle_db_error)?; let response = Response::new(Empty {}); Ok(response) } async fn get_outbound_keys_for_user( &self, request: tonic::Request, ) -> Result, tonic::Status> { let message = request.into_inner(); let devices_map = self .db_client .get_keys_for_user_id(&message.user_id, true) .await .map_err(handle_db_error)? .ok_or_else(|| tonic::Status::not_found("user not found"))?; let transformed_devices = devices_map .into_iter() .filter_map(|(key, device_info)| { let device_info_with_auth = DeviceInfoWithAuth { device_info, auth_type: None, }; match OutboundKeyInfo::try_from(device_info_with_auth) { Ok(key_info) => Some((key, key_info)), Err(_) => { error!("Failed to transform device info for key {}", key); None } } }) .collect::>(); Ok(tonic::Response::new(OutboundKeysForUserResponse { devices: transformed_devices, })) } async fn get_inbound_keys_for_user( &self, request: tonic::Request, ) -> Result, tonic::Status> { use identity::IdentityInfo; let message = request.into_inner(); let devices_map = self .db_client .get_keys_for_user_id(&message.user_id, false) .await .map_err(handle_db_error)? .ok_or_else(|| tonic::Status::not_found("user not found"))?; let transformed_devices = devices_map .into_iter() .filter_map(|(key, device_info)| { let device_info_with_auth = DeviceInfoWithAuth { device_info, auth_type: None, }; match InboundKeyInfo::try_from(device_info_with_auth) { Ok(key_info) => Some((key, key_info)), Err(_) => { error!("Failed to transform device info for key {}", key); None } } }) .collect::>(); let identifier = self .db_client .get_user_identifier(&message.user_id) .await .map_err(handle_db_error)?; let identity_info = IdentityInfo::try_from(identifier)?; Ok(tonic::Response::new(InboundKeysForUserResponse { devices: transformed_devices, identity: Some(Identity { identity_info: Some(identity_info), }), })) } async fn get_keyserver_keys( &self, request: Request, ) -> Result, Status> { use identity::IdentityInfo; let message = request.into_inner(); let keyserver_info = self .db_client .get_keyserver_keys_for_user(&message.user_id) .await .map_err(handle_db_error)? .map(|db_keys| OutboundKeyInfo { identity_info: Some(IdentityKeyInfo { payload: db_keys.key_payload, payload_signature: db_keys.key_payload_signature, social_proof: db_keys.social_proof, }), content_prekey: Some(Prekey { prekey: db_keys.content_prekey.prekey, prekey_signature: db_keys.content_prekey.prekey_signature, }), notif_prekey: Some(Prekey { prekey: db_keys.notif_prekey.prekey, prekey_signature: db_keys.notif_prekey.prekey_signature, }), one_time_content_prekey: db_keys.content_one_time_key, one_time_notif_prekey: db_keys.notif_one_time_key, }); let identifier = self .db_client .get_user_identifier(&message.user_id) .await .map_err(handle_db_error)?; let identity_info = IdentityInfo::try_from(identifier)?; let identity = Some(Identity { identity_info: Some(identity_info), }); let response = Response::new(KeyserverKeysResponse { keyserver_info, identity, }); return Ok(response); } async fn upload_one_time_keys( &self, request: tonic::Request, ) -> Result, tonic::Status> { let (user_id, device_id) = get_user_and_device_id(&request)?; let message = request.into_inner(); debug!("Attempting to update one time keys for user: {}", user_id); self .db_client .append_one_time_prekeys( device_id, message.content_one_time_prekeys, message.notif_one_time_prekeys, ) .await .map_err(handle_db_error)?; Ok(tonic::Response::new(Empty {})) } async fn find_user_id( &self, request: tonic::Request, ) -> Result, tonic::Status> { let message = request.into_inner(); use find_user_id_request::Identifier; let (user_ident, auth_type) = match message.identifier { None => { return Err(tonic::Status::invalid_argument("no identifier provided")) } Some(Identifier::Username(username)) => (username, AuthType::Password), Some(Identifier::WalletAddress(address)) => (address, AuthType::Wallet), }; let (is_reserved_result, user_id_result) = tokio::join!( self .db_client .username_in_reserved_usernames_table(&user_ident), self .db_client .get_user_id_from_user_info(user_ident.clone(), &auth_type), ); let is_reserved = is_reserved_result.map_err(handle_db_error)?; let user_id = user_id_result.map_err(handle_db_error)?; Ok(Response::new(FindUserIdResponse { user_id, is_reserved, })) } async fn update_user_password_start( &self, request: tonic::Request, ) -> Result, tonic::Status> { let (user_id, _) = get_user_and_device_id(&request)?; let message = request.into_inner(); let server_registration = comm_opaque2::server::Registration::new(); let server_message = server_registration .start( &CONFIG.server_setup, &message.opaque_registration_request, user_id.as_bytes(), ) .map_err(protocol_error_to_grpc_status)?; let update_state = UpdateState { user_id }; let session_id = self .cache .insert_with_uuid_key(WorkflowInProgress::Update(update_state)) .await; let response = UpdateUserPasswordStartResponse { session_id, opaque_registration_response: server_message, }; Ok(Response::new(response)) } async fn update_user_password_finish( &self, request: tonic::Request, ) -> Result, tonic::Status> { let message = request.into_inner(); let Some(WorkflowInProgress::Update(state)) = self.cache.get(&message.session_id) else { return Err(tonic::Status::not_found("session not found")); }; self.cache.invalidate(&message.session_id).await; let server_registration = comm_opaque2::server::Registration::new(); let password_file = server_registration .finish(&message.opaque_registration_upload) .map_err(protocol_error_to_grpc_status)?; self .db_client .update_user_password(state.user_id, password_file) .await .map_err(handle_db_error)?; let response = Empty {}; Ok(Response::new(response)) } async fn log_out_user( &self, request: tonic::Request, ) -> Result, tonic::Status> { let (user_id, device_id) = get_user_and_device_id(&request)?; self .db_client .remove_device_from_users_table(user_id.clone(), device_id.clone()) .await .map_err(handle_db_error)?; self .db_client .delete_access_token_data(user_id, device_id) .await .map_err(handle_db_error)?; let response = Empty {}; Ok(Response::new(response)) } async fn delete_user( &self, request: tonic::Request, ) -> Result, tonic::Status> { let (user_id, _) = get_user_and_device_id(&request)?; self .db_client .delete_user(user_id) .await .map_err(handle_db_error)?; let response = Empty {}; Ok(Response::new(response)) } async fn get_device_list_for_user( &self, request: tonic::Request, ) -> Result, tonic::Status> { let GetDeviceListRequest { user_id, since_timestamp, } = request.into_inner(); let since = since_timestamp .map(|timestamp| { DateTime::::from_utc_timestamp_millis(timestamp) .ok_or_else(|| tonic::Status::invalid_argument("Invalid timestamp")) }) .transpose()?; let mut db_result = self .db_client .get_device_list_history(user_id, since) .await .map_err(handle_db_error)?; // these should be sorted already, but just in case db_result.sort_by_key(|list| list.timestamp); let device_list_updates: Vec = db_result .into_iter() .map(RawDeviceList::from) .map(SignedDeviceList::try_from_raw) .collect::, _>>()?; let stringified_updates = device_list_updates .iter() .map(serde_json::to_string) .collect::, _>>() .map_err(|err| { error!("Failed to serialize device list updates: {}", err); tonic::Status::failed_precondition("unexpected error") })?; Ok(Response::new(GetDeviceListResponse { device_list_updates: stringified_updates, })) } } // raw device list that can be serialized to JSON (and then signed in the future) #[derive(serde::Serialize)] struct RawDeviceList { devices: Vec, timestamp: i64, } impl From for RawDeviceList { fn from(row: DeviceListRow) -> Self { Self { devices: row.device_ids, timestamp: row.timestamp.timestamp_millis(), } } } #[derive(serde::Serialize)] #[serde(rename_all = "camelCase")] struct SignedDeviceList { /// JSON-stringified [`RawDeviceList`] raw_device_list: String, } impl SignedDeviceList { /// Serialize (and sign in the future) a [`RawDeviceList`] fn try_from_raw(raw: RawDeviceList) -> Result { let stringified_list = serde_json::to_string(&raw).map_err(|err| { error!("Failed to serialize raw device list: {}", err); tonic::Status::failed_precondition("unexpected error") })?; Ok(Self { raw_device_list: stringified_list, }) } } #[cfg(test)] mod tests { use super::*; #[test] fn serialize_device_list_updates() { let raw_updates = vec![ RawDeviceList { devices: vec!["device1".into()], timestamp: 111111111, }, RawDeviceList { devices: vec!["device1".into(), "device2".into()], timestamp: 222222222, }, ]; let expected_raw_list1 = r#"{"devices":["device1"],"timestamp":111111111}"#; let expected_raw_list2 = r#"{"devices":["device1","device2"],"timestamp":222222222}"#; let signed_updates = raw_updates .into_iter() .map(SignedDeviceList::try_from_raw) .collect::, _>>() .expect("signing device list updates failed"); assert_eq!(signed_updates[0].raw_device_list, expected_raw_list1); assert_eq!(signed_updates[1].raw_device_list, expected_raw_list2); let stringified_updates = signed_updates .iter() .map(serde_json::to_string) .collect::, _>>() .expect("serialize signed device lists failed"); let expected_stringified_list1 = r#"{"rawDeviceList":"{\"devices\":[\"device1\"],\"timestamp\":111111111}"}"#; let expected_stringified_list2 = r#"{"rawDeviceList":"{\"devices\":[\"device1\",\"device2\"],\"timestamp\":222222222}"}"#; assert_eq!(stringified_updates[0], expected_stringified_list1); assert_eq!(stringified_updates[1], expected_stringified_list2); } } diff --git a/services/reports/src/email/mod.rs b/services/reports/src/email/mod.rs index 999ae17b4..83c81f7d1 100644 --- a/services/reports/src/email/mod.rs +++ b/services/reports/src/email/mod.rs @@ -1,73 +1,78 @@ use postmark::{ api::email::{self, SendEmailBatchRequest}, reqwest::{PostmarkClient, PostmarkClientError}, Query, }; use tracing::{debug, trace, warn}; use crate::{ config::CONFIG, report_types::{ReportID, ReportInput, ReportType}, }; pub mod config; mod template; pub type EmailError = postmark::QueryError; pub struct ReportEmail { report_type: ReportType, rendered_message: String, subject: String, } pub fn prepare_email( report_input: &ReportInput, report_id: &ReportID, user_id: Option<&str>, ) -> ReportEmail { let message = template::render_email_for_report(report_input, report_id, user_id); let subject = template::subject_for_report(report_input, user_id); ReportEmail { report_type: report_input.report_type, rendered_message: message, subject, } } pub async fn send_emails( emails: impl IntoIterator, ) -> Result<(), EmailError> { let Some(email_config) = CONFIG.email_config() else { debug!("E-mail config unavailable. Skipping sending e-mails"); return Ok(()); }; // it's cheap to build this every time let client = PostmarkClient::builder() .token(&email_config.postmark_token) .build(); let requests: SendEmailBatchRequest = emails .into_iter() .filter_map(|item| { - let Some(recipient) = email_config.recipient_for_report_type(&item.report_type) else { - warn!("Recipient E-mail for {:?} not configured. Skipping", &item.report_type); + let Some(recipient) = + email_config.recipient_for_report_type(&item.report_type) + else { + warn!( + "Recipient E-mail for {:?} not configured. Skipping", + &item.report_type + ); return None; }; let email = email::SendEmailRequest::builder() .from(&email_config.sender_email) .to(recipient) .body(email::Body::html(item.rendered_message)) .subject(item.subject) .build(); Some(email) }) .collect(); let responses = requests.execute(&client).await?; trace!(?responses, "E-mails sent successfully."); Ok(()) } diff --git a/services/reports/src/report_utils.rs b/services/reports/src/report_utils.rs index f4aaefbe9..129777a6a 100644 --- a/services/reports/src/report_utils.rs +++ b/services/reports/src/report_utils.rs @@ -1,79 +1,84 @@ use serde_json::Value; use std::collections::{HashMap, HashSet}; type ReportContent = HashMap; /// Returns a set of keys which differ for both objects pub fn find_inconsistent_object_keys( first: &serde_json::Map, second: &serde_json::Map, ) -> HashSet { let mut non_matching_ids = HashSet::new(); for (k, v) in first { if !second.get(k).is_some_and(|it| v == it) { non_matching_ids.insert(k.to_string()); } } for k in second.keys() { if !first.contains_key(k) { non_matching_ids.insert(k.to_string()); } } non_matching_ids } pub fn inconsistent_thread_ids(content: &ReportContent) -> HashSet { - let Some(push_result) = content - .get("pushResult") - .and_then(Value::as_object) else { return HashSet::new(); }; - let Some(before_action) = content - .get("beforeAction") - .and_then(Value::as_object) else { return HashSet::new(); }; + let Some(push_result) = content.get("pushResult").and_then(Value::as_object) + else { + return HashSet::new(); + }; + let Some(before_action) = + content.get("beforeAction").and_then(Value::as_object) + else { + return HashSet::new(); + }; find_inconsistent_object_keys(push_result, before_action) } pub fn inconsistent_user_ids(content: &ReportContent) -> HashSet { - let Some(before) = content - .get("beforeStateCheck") - .and_then(Value::as_object) else { return HashSet::new(); }; - let Some(after) = content - .get("afterStateCheck") - .and_then(Value::as_object) else { return HashSet::new(); }; + let Some(before) = content.get("beforeStateCheck").and_then(Value::as_object) + else { + return HashSet::new(); + }; + let Some(after) = content.get("afterStateCheck").and_then(Value::as_object) + else { + return HashSet::new(); + }; find_inconsistent_object_keys(before, after) } pub fn inconsistent_entry_ids(_content: &ReportContent) -> HashSet { HashSet::from(["--Unimplemented--".to_string()]) } #[cfg(test)] mod tests { use super::*; use serde_json::json; #[test] fn test_inconsistent_object_keys() { let obj_a = json!({ "foo": "bar", "a": 2, "b": "x", "c": false, }); let obj_b = json!({ "foo": "bar", "a": 2, "b": "y", "D": true, }); let expected_keys = HashSet::from(["b".to_string(), "c".to_string(), "D".to_string()]); let inconsistent_keys = find_inconsistent_object_keys( obj_a.as_object().unwrap(), obj_b.as_object().unwrap(), ); assert_eq!(&expected_keys, &inconsistent_keys); } } diff --git a/shared/backup_client/src/lib.rs b/shared/backup_client/src/lib.rs index 4ff90a6f5..b9f499abf 100644 --- a/shared/backup_client/src/lib.rs +++ b/shared/backup_client/src/lib.rs @@ -1,305 +1,305 @@ pub use comm_lib::auth::UserIdentity; pub use comm_lib::backup::{ DownloadLogsRequest, LatestBackupIDResponse, LogWSRequest, LogWSResponse, UploadLogRequest, }; pub use futures_util::{SinkExt, StreamExt, TryStreamExt}; use futures_util::{Sink, Stream}; use hex::ToHex; use reqwest::{ header::InvalidHeaderValue, multipart::{Form, Part}, Body, }; use sha2::{Digest, Sha256}; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::{ connect_async, tungstenite::{ client::IntoClientRequest, http::{header, Request}, Error as TungsteniteError, Message::{Binary, Ping}, }, }; #[derive(Debug, Clone)] pub struct BackupClient { url: reqwest::Url, } impl BackupClient { pub fn new>(url: T) -> Result { Ok(BackupClient { url: url.try_into()?, }) } } /// Backup functions impl BackupClient { pub async fn upload_backup( &self, user_identity: &UserIdentity, backup_data: BackupData, ) -> Result<(), Error> { let BackupData { backup_id, user_keys, user_data, attachments, } = backup_data; let client = reqwest::Client::new(); let form = Form::new() .text("backup_id", backup_id) .text( "user_keys_hash", Sha256::digest(&user_keys).encode_hex::(), ) .part("user_keys", Part::stream(Body::from(user_keys))) .text( "user_data_hash", Sha256::digest(&user_data).encode_hex::(), ) .part("user_data", Part::stream(Body::from(user_data))) .text("attachments", attachments.join("\n")); let response = client .post(self.url.join("backups")?) .bearer_auth(user_identity.as_authorization_token()?) .multipart(form) .send() .await?; response.error_for_status()?; Ok(()) } pub async fn download_backup_data( &self, backup_descriptor: &BackupDescriptor, requested_data: RequestedData, ) -> Result, Error> { let client = reqwest::Client::new(); let url = self.url.join("backups/")?; let url = match backup_descriptor { BackupDescriptor::BackupID { backup_id, .. } => { url.join(&format!("{backup_id}/"))? } BackupDescriptor::Latest { username } => { url.join(&format!("latest/{username}/"))? } }; let url = match &requested_data { RequestedData::BackupID => url.join("backup_id")?, RequestedData::UserKeys => url.join("user_keys")?, RequestedData::UserData => url.join("user_data")?, }; let mut request = client.get(url); if let BackupDescriptor::BackupID { user_identity, .. } = backup_descriptor { request = request.bearer_auth(user_identity.as_authorization_token()?) } let response = request.send().await?; let result = response.error_for_status()?.bytes().await?.to_vec(); Ok(result) } } /// Log functions impl BackupClient { pub async fn upload_logs( &self, user_identity: &UserIdentity, backup_id: &str, ) -> Result< ( impl Sink, impl Stream>, ), Error, > { let request = self.create_ws_request(user_identity, backup_id)?; let (stream, response) = connect_async(request).await?; if response.status().is_client_error() { return Err(Error::WSInitError(TungsteniteError::Http(response))); } let (tx, rx) = stream.split(); let tx = tx.with(|request: UploadLogRequest| async { let request = LogWSRequest::UploadLog(request); let request = bincode::serialize(&request)?; Ok(Binary(request)) }); let rx = rx.filter_map(|msg| async { let response = match get_log_ws_response(msg) { Some(Ok(response)) => response, Some(Err(err)) => return Some(Err(err)), None => return None, }; match response { LogWSResponse::LogUploaded { log_id } => Some(Ok(log_id)), LogWSResponse::LogDownload { .. } | LogWSResponse::LogDownloadFinished { .. } => { Some(Err(WSError::InvalidBackupMessage)) } LogWSResponse::ServerError => Some(Err(WSError::ServerError)), } }); Ok((tx, rx)) } pub async fn download_logs( &self, user_identity: &UserIdentity, backup_id: &str, ) -> Result< ( impl Sink, impl Stream>, ), Error, > { let request = self.create_ws_request(user_identity, backup_id)?; let (stream, response) = connect_async(request).await?; if response.status().is_client_error() { return Err(Error::WSInitError(TungsteniteError::Http(response))); } let (tx, rx) = stream.split(); let tx = tx.with(|request: DownloadLogsRequest| async { let request = LogWSRequest::DownloadLogs(request); let request = bincode::serialize(&request)?; Ok(Binary(request)) }); let rx = rx.filter_map(|msg| async { let response = match get_log_ws_response(msg) { Some(Ok(response)) => response, Some(Err(err)) => return Some(Err(err)), None => return None, }; match response { LogWSResponse::LogDownloadFinished { .. } | LogWSResponse::LogDownload { .. } => Some(Ok(response)), LogWSResponse::LogUploaded { .. } => { - return Some(Err(WSError::InvalidBackupMessage)) + Some(Err(WSError::InvalidBackupMessage)) } - LogWSResponse::ServerError => return Some(Err(WSError::ServerError)), + LogWSResponse::ServerError => Some(Err(WSError::ServerError)), } }); Ok((tx, rx)) } fn create_ws_request( &self, user_identity: &UserIdentity, backup_id: &str, ) -> Result, Error> { let mut url = self.url.clone(); match url.scheme() { "http" => url.set_scheme("ws")?, "https" => url.set_scheme("wss")?, _ => (), }; let url = url.join("logs/")?.join(backup_id)?; let mut request = url.into_client_request().unwrap(); let token = user_identity.as_authorization_token()?; request .headers_mut() .insert(header::AUTHORIZATION, format!("Bearer {token}").parse()?); Ok(request) } } #[derive(Debug, Clone)] pub struct BackupData { pub backup_id: String, pub user_keys: Vec, pub user_data: Vec, pub attachments: Vec, } #[derive(Debug, Clone)] pub enum BackupDescriptor { BackupID { backup_id: String, user_identity: UserIdentity, }, Latest { username: String, }, } #[derive(Debug, Clone, Copy)] pub enum RequestedData { BackupID, UserKeys, UserData, } #[derive( Debug, derive_more::Display, derive_more::Error, derive_more::From, )] pub enum Error { InvalidAuthorizationHeader, UrlError(url::ParseError), ReqwestError(reqwest::Error), WSInitError(TungsteniteError), JsonError(serde_json::Error), } impl From for Error { fn from(_: InvalidHeaderValue) -> Self { Self::InvalidAuthorizationHeader } } #[derive( Debug, derive_more::Display, derive_more::Error, derive_more::From, )] pub enum WSError { BincodeError(bincode::Error), TungsteniteError(TungsteniteError), InvalidWSMessage, InvalidBackupMessage, ServerError, } fn get_log_ws_response( msg: Result, ) -> Option> { let bytes = match msg { Ok(Binary(bytes)) => bytes, // Handled by tungstenite Ok(Ping(_)) => return None, Ok(_) => return Some(Err(WSError::InvalidWSMessage)), Err(err) => return Some(Err(err.into())), }; match bincode::deserialize(&bytes) { Ok(response) => Some(Ok(response)), Err(err) => Some(Err(err.into())), } } diff --git a/shared/comm-lib/src/blob/client.rs b/shared/comm-lib/src/blob/client.rs index 47ba266da..a972fc22e 100644 --- a/shared/comm-lib/src/blob/client.rs +++ b/shared/comm-lib/src/blob/client.rs @@ -1,407 +1,408 @@ use bytes::Bytes; use derive_more::{Display, Error, From}; use futures_core::Stream; use futures_util::StreamExt; use reqwest::{ multipart::{Form, Part}, Body, Method, RequestBuilder, }; use tracing::{debug, error, trace, warn}; // publicly re-export some reqwest types pub use reqwest::Error as ReqwestError; pub use reqwest::StatusCode; pub use reqwest::Url; use crate::auth::{AuthorizationCredential, UserIdentity}; #[derive(From, Error, Debug, Display)] pub enum BlobServiceError { /// HTTP Client errors, this includes: /// - connection failures /// - request errors (e.g. upload input stream) #[display(...)] ClientError(ReqwestError), /// Invalid Blob service URL provided #[display(...)] URLError(#[error(ignore)] String), /// Blob service returned HTTP 404 /// - blob or holder not found #[display(...)] NotFound, /// Blob service returned HTTP 409 /// - blob or holder already exists #[display(...)] AlreadyExists, /// Blob service returned HTTP 400 /// - invalid holder or blob_hash format #[display(...)] InvalidArguments, /// Blob service returned HTTP 50x #[display(...)] ServerError, #[display(...)] UnexpectedHttpStatus(#[error(ignore)] reqwest::StatusCode), #[display(...)] UnexpectedError, } /// A client interface to Blob service. // /// The `BlobServiceClient` holds a connection pool internally, so it is advised that /// you create one and **reuse** it, by **cloning**. /// /// A clone is recommended for each individual client identity, that has /// a different `UserIdentity`. /// /// # Example /// ```ignore /// // create client for user A /// let clientA = BlobServiceClient::new(blob_endpoint).with_user_identity(userA); /// /// // reuse the client connection with different credentials /// let clientB = clientA.with_user_identity(userB); /// /// // clientA still uses userA credentials /// ``` /// /// A clone is recommended when the client concurrently handles multiple identities - /// e.g. a HTTP service handling requests from different users. The `with_user_identity()` /// method should be used for this purpose. /// /// You should **not** wrap the `BlobServiceClient` in an `Rc` or `Arc` to **reuse** it, /// because it already uses an `Arc` internally. #[derive(Clone)] pub struct BlobServiceClient { http_client: reqwest::Client, blob_service_url: reqwest::Url, auth_credential: Option, } impl BlobServiceClient { /// Creates a new Blob Service client instance. It is unauthenticated by default /// so you need to call `with_user_identity()` afterwards: /// ```ignore /// let client = BlobServiceClient::new(blob_endpoint).with_user_identity(user); /// ``` /// /// **Note**: It is advised to create this client once and reuse by **cloning**. /// See [`BlobServiceClient`] docs for details pub fn new(blob_service_url: reqwest::Url) -> Self { debug!("Creating BlobServiceClient. URL: {}", blob_service_url); Self { http_client: reqwest::Client::new(), blob_service_url, auth_credential: None, } } /// Clones the client and sets the [`UserIdentity`] for the new instance. /// This allows the client to reuse the same connection pool for different users. /// /// This is the same as calling /// ```ignore /// client.with_authentication(AuthorizationCredential::UserToken(user_identity)) /// ```` pub fn with_user_identity(&self, user_identity: UserIdentity) -> Self { self.with_authentication(AuthorizationCredential::UserToken(user_identity)) } /// Clones the client and sets the [`AuthorizationCredential`] for the new instance. /// This allows the client to reuse the same connection pool for different users. pub fn with_authentication( &self, auth_credential: AuthorizationCredential, ) -> Self { trace!("Set auth_credential: {:?}", &auth_credential); let mut this = self.clone(); this.auth_credential = Some(auth_credential); this } /// Downloads blob with given [`blob_hash`]. /// /// @returns a stream of blob bytes /// /// # Errors thrown /// - [BlobServiceError::NotFound] if blob with given hash does not exist /// - [BlobServiceError::InvalidArguments] if blob hash has incorrect format /// /// # Example /// ```ignore /// let client = /// BlobServiceClient::new("http://localhost:50053".parse()?); /// /// let mut stream = client.get("hello").await?; /// while let Some(data) = stream.try_next().await? { /// println!("Got data: {:?}", data); /// } /// ``` pub async fn get( &self, blob_hash: &str, ) -> BlobResult>> { debug!(?blob_hash, "Get blob request"); let url = self.get_blob_url(Some(blob_hash))?; let response = self .request(Method::GET, url)? .send() .await .map_err(BlobServiceError::ClientError)?; debug!("Response status: {}", response.status()); if response.status().is_success() { let stream = response.bytes_stream().map(|result| match result { Ok(bytes) => Ok(bytes), Err(error) => { warn!("Error while streaming response: {}", error); Err(BlobServiceError::ClientError(error)) } }); return Ok(stream); } let error = handle_http_error(response.status()); if let Ok(message) = response.text().await { trace!("Error response message: {}", message); } Err(error) } /// Assigns a new holder to a blob represented by [`blob_hash`]. /// Returns `BlobServiceError::AlreadyExists` if blob already has /// a holder with given [`holder`] name. pub async fn assign_holder( &self, blob_hash: &str, holder: &str, ) -> BlobResult { debug!("Assign holder request"); let url = self.get_blob_url(None)?; let payload = AssignHolderRequest { holder: holder.to_string(), blob_hash: blob_hash.to_string(), }; debug!("Request payload: {:?}", payload); let response = self .request(Method::POST, url)? .json(&payload) .send() .await?; debug!("Response status: {}", response.status()); if response.status().is_success() { let AssignHolderResponse { data_exists } = response.json().await?; trace!("Data exists: {}", data_exists); return Ok(data_exists); } let error = handle_http_error(response.status()); if let Ok(message) = response.text().await { trace!("Error response message: {}", message); } Err(error) } /// Revokes given holder from a blob represented by [`blob_hash`]. /// Returns `BlobServiceError::NotFound` if blob with given hash does not exist /// or it does not have such holder pub async fn revoke_holder( &self, blob_hash: &str, holder: &str, ) -> BlobResult<()> { debug!("Revoke holder request"); let url = self.get_blob_url(None)?; let payload = RevokeHolderRequest { holder: holder.to_string(), blob_hash: blob_hash.to_string(), }; debug!("Request payload: {:?}", payload); let response = self .request(Method::DELETE, url)? .json(&payload) .send() .await?; debug!("Response status: {}", response.status()); if response.status().is_success() { trace!("Revoke holder request successful"); return Ok(()); } let error = handle_http_error(response.status()); if let Ok(message) = response.text().await { trace!("Error response message: {}", message); } Err(error) } /// Uploads a blob. Returns `BlobServiceError::AlreadyExists` if blob with given hash /// already exists. /// /// # Example /// ```ignore /// use std::io::{Error, ErrorKind}; /// /// let client = /// BlobServiceClient::new("http://localhost:50053".parse()?); /// /// let stream = async_stream::stream! { /// yield Ok(vec![1, 2, 3]); /// yield Ok(vec![4, 5, 6]); /// yield Err(Error::new(ErrorKind::Other, "Oops")); /// }; /// client.upload_blob(&blob_hash, stream).await?; /// ``` pub async fn upload_blob( &self, blob_hash: H, data_stream: S, ) -> BlobResult<()> where H: Into, S: futures_core::stream::TryStream + Send + Sync + 'static, S::Error: Into>, Bytes: From, { debug!("Upload blob request"); let url = self.get_blob_url(None)?; let streaming_body = Body::wrap_stream(data_stream); let form = Form::new() .text("blob_hash", blob_hash.into()) .part("blob_data", Part::stream(streaming_body)); let response = self .request(Method::PUT, url)? .multipart(form) .send() .await?; debug!("Response status: {}", response.status()); if response.status().is_success() { trace!("Blob upload successful"); return Ok(()); } let error = handle_http_error(response.status()); if let Ok(message) = response.text().await { trace!("Error response message: {}", message); } Err(error) } /// A wrapper around [`BlobServiceClient::assign_holder`] and [`BlobServiceClient::upload_blob`]. /// /// Assigns a new holder to a blob represented by [`blob_hash`]. If the blob does not exist, /// uploads the data from [`data_stream`]. pub async fn simple_put( &self, blob_hash: &str, holder: &str, data_stream: S, ) -> BlobResult where S: futures_core::stream::TryStream + Send + Sync + 'static, S::Error: Into>, Bytes: From, { trace!("Begin simple put. Assigning holder..."); let data_exists = self.assign_holder(blob_hash, holder).await?; if data_exists { trace!("Blob data already exists. Skipping upload."); return Ok(false); } trace!("Uploading blob data..."); - let Err(upload_error) = self.upload_blob(blob_hash, data_stream).await else { + let Err(upload_error) = self.upload_blob(blob_hash, data_stream).await + else { return Ok(true); }; trace!(%blob_hash, %holder, "Revoking holder due to upload failure"); self.schedule_revoke_holder(blob_hash, holder); Err(upload_error) } /// Revokes holder in a separate task. Useful to clean up after /// upload failure without blocking the current task. pub fn schedule_revoke_holder( &self, blob_hash: impl Into, holder: impl Into, ) { let this = self.clone(); let blob_hash: String = blob_hash.into(); let holder: String = holder.into(); tokio::spawn(async move { if let Err(err) = this.revoke_holder(&blob_hash, &holder).await { warn!("Failed to revoke holder: {0:?} - {0}", err); } }); } } // private helper methods impl BlobServiceClient { fn get_blob_url( &self, blob_hash: Option<&str>, ) -> Result { let path = match blob_hash { Some(hash) => format!("/blob/{}", hash), None => "/blob".to_string(), }; let url = self .blob_service_url .join(&path) .map_err(|err| BlobServiceError::URLError(err.to_string()))?; trace!("Constructed request URL: {}", url); Ok(url) } fn request( &self, http_method: Method, url: Url, ) -> BlobResult { let request = self.http_client.request(http_method, url); match &self.auth_credential { Some(credential) => { let token = credential.as_authorization_token().map_err(|e| { error!("Failed to parse authorization token: {}", e); BlobServiceError::UnexpectedError })?; Ok(request.bearer_auth(token)) } None => Ok(request), } } } fn handle_http_error(status_code: StatusCode) -> BlobServiceError { match status_code { StatusCode::BAD_REQUEST => BlobServiceError::InvalidArguments, StatusCode::NOT_FOUND => BlobServiceError::NotFound, StatusCode::CONFLICT => BlobServiceError::AlreadyExists, code if code.is_server_error() => BlobServiceError::ServerError, code => BlobServiceError::UnexpectedHttpStatus(code), } } type BlobResult = Result; #[derive(serde::Deserialize)] struct AssignHolderResponse { data_exists: bool, } #[derive(Debug, serde::Serialize)] struct AssignHolderRequest { blob_hash: String, holder: String, } // they have the same layout so we can simply alias type RevokeHolderRequest = AssignHolderRequest; diff --git a/shared/comm-lib/src/blob/types.rs b/shared/comm-lib/src/blob/types.rs index ac6fd28ae..3c4425353 100644 --- a/shared/comm-lib/src/blob/types.rs +++ b/shared/comm-lib/src/blob/types.rs @@ -1,69 +1,69 @@ use derive_more::Constructor; use hex::ToHex; use sha2::{Digest, Sha256}; /// Blob owning information - stores both blob_hash and holder #[derive(Clone, Debug, Constructor)] pub struct BlobInfo { pub blob_hash: String, pub holder: String, } impl BlobInfo { pub fn from_bytes(data: &[u8]) -> Self { Self { - blob_hash: Sha256::digest(&data).encode_hex(), + blob_hash: Sha256::digest(data).encode_hex(), holder: uuid::Uuid::new_v4().to_string(), } } } #[cfg(feature = "aws")] mod db_conversions { use super::*; use crate::database::{AttributeTryInto, DBItemError, TryFromAttribute}; use aws_sdk_dynamodb::types::AttributeValue; use std::collections::HashMap; const BLOB_HASH_DDB_MAP_KEY: &str = "blob_hash"; const HOLDER_DDB_MAP_KEY: &str = "holder"; impl From for AttributeValue { fn from(value: BlobInfo) -> Self { let map = HashMap::from([ ( BLOB_HASH_DDB_MAP_KEY.to_string(), AttributeValue::S(value.blob_hash), ), ( HOLDER_DDB_MAP_KEY.to_string(), AttributeValue::S(value.holder), ), ]); AttributeValue::M(map) } } impl From<&BlobInfo> for AttributeValue { fn from(value: &BlobInfo) -> Self { AttributeValue::from(value.to_owned()) } } impl TryFromAttribute for BlobInfo { fn try_from_attr( attribute_name: impl Into, attribute: Option, ) -> Result { let attr_name: String = attribute_name.into(); let mut inner_map: HashMap = attribute.attr_try_into(&attr_name)?; let blob_hash = inner_map .remove("blob_hash") .attr_try_into(format!("{attr_name}.blob_hash"))?; let holder = inner_map .remove("holder") .attr_try_into(format!("{attr_name}.holder"))?; Ok(BlobInfo { blob_hash, holder }) } } } diff --git a/shared/comm-lib/src/database.rs b/shared/comm-lib/src/database.rs index 600ce53d5..26b69340b 100644 --- a/shared/comm-lib/src/database.rs +++ b/shared/comm-lib/src/database.rs @@ -1,820 +1,820 @@ use aws_sdk_dynamodb::types::AttributeValue; pub use aws_sdk_dynamodb::Error as DynamoDBError; use chrono::{DateTime, Utc}; use std::collections::HashSet; use std::fmt::{Display, Formatter}; use std::num::ParseIntError; use std::str::FromStr; #[cfg(feature = "blob-client")] pub mod blob; // # Useful type aliases // Rust exports `pub type` only into the so-called "type namespace", but in // order to use them e.g. with the `TryFromAttribute` trait, they also need // to be exported into the "value namespace" which is what `pub use` does. // // To overcome that, a dummy module is created and aliases are re-exported // with `pub use` construct mod aliases { use aws_sdk_dynamodb::types::AttributeValue; use std::collections::HashMap; pub type AttributeMap = HashMap; } pub use self::aliases::AttributeMap; // # Error handling #[derive( Debug, derive_more::Display, derive_more::From, derive_more::Error, )] pub enum Error { #[display(...)] AwsSdk(DynamoDBError), #[display(...)] Attribute(DBItemError), #[display(fmt = "Maximum retries exceeded")] MaxRetriesExceeded, } #[derive(Debug, derive_more::From)] pub enum Value { AttributeValue(Option), String(String), } #[derive(Debug, derive_more::Error, derive_more::Constructor)] pub struct DBItemError { pub attribute_name: String, pub attribute_value: Value, pub attribute_error: DBItemAttributeError, } impl Display for DBItemError { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { match &self.attribute_error { DBItemAttributeError::Missing => { write!(f, "Attribute {} is missing", self.attribute_name) } DBItemAttributeError::IncorrectType => write!( f, "Value for attribute {} has incorrect type: {:?}", self.attribute_name, self.attribute_value ), error => write!( f, "Error regarding attribute {} with value {:?}: {}", self.attribute_name, self.attribute_value, error ), } } } #[derive(Debug, derive_more::Display, derive_more::Error)] pub enum DBItemAttributeError { #[display(...)] Missing, #[display(...)] IncorrectType, #[display(...)] TimestampOutOfRange, #[display(...)] InvalidTimestamp(chrono::ParseError), #[display(...)] InvalidNumberFormat(ParseIntError), #[display(...)] ExpiredTimestamp, #[display(...)] InvalidValue, } /// Conversion trait for [`AttributeValue`] /// /// Types implementing this trait are able to do the following: /// ```ignore /// use comm_lib::database::{TryFromAttribute, AttributeTryInto}; /// /// let foo = SomeType::try_from_attr("MyAttribute", Some(attribute))?; /// /// // if `AttributeTryInto` is imported, also: /// let bar = Some(attribute).attr_try_into("MyAttribute")?; /// ``` pub trait TryFromAttribute: Sized { fn try_from_attr( attribute_name: impl Into, attribute: Option, ) -> Result; } /// Do NOT implement this trait directly. Implement [`TryFromAttribute`] instead pub trait AttributeTryInto { fn attr_try_into( self, attribute_name: impl Into, ) -> Result; } // Automatic attr_try_into() for all attribute values // that have TryFromAttribute implemented impl AttributeTryInto for Option { fn attr_try_into( self, attribute_name: impl Into, ) -> Result { T::try_from_attr(attribute_name, self) } } /// Helper trait for extracting attributes from a collection pub trait AttributeExtractor { /// Gets an attribute from the map and tries to convert it to the given type /// This method does not consume the raw attribute - it gets cloned /// See [`AttributeExtractor::take_attr`] for a non-cloning method fn get_attr( &self, attribute_name: &str, ) -> Result; /// Takes an attribute from the map and tries to convert it to the given type /// This method consumes the raw attribute - it gets removed from the map /// See [`AttributeExtractor::get_attr`] for a non-mutating method fn take_attr( &mut self, attribute_name: &str, ) -> Result; } impl AttributeExtractor for AttributeMap { fn get_attr( &self, attribute_name: &str, ) -> Result { T::try_from_attr(attribute_name, self.get(attribute_name).cloned()) } fn take_attr( &mut self, attribute_name: &str, ) -> Result { T::try_from_attr(attribute_name, self.remove(attribute_name)) } } impl TryFromAttribute for String { fn try_from_attr( attribute_name: impl Into, attribute_value: Option, ) -> Result { match attribute_value { Some(AttributeValue::S(value)) => Ok(value), Some(_) => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::Missing, )), } } } impl TryFromAttribute for bool { fn try_from_attr( attribute_name: impl Into, attribute_value: Option, ) -> Result { match attribute_value { Some(AttributeValue::Bool(value)) => Ok(value), Some(_) => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::Missing, )), } } } impl TryFromAttribute for DateTime { fn try_from_attr( attribute_name: impl Into, attribute: Option, ) -> Result { match &attribute { Some(AttributeValue::S(datetime)) => datetime.parse().map_err(|e| { DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute), DBItemAttributeError::InvalidTimestamp(e), ) }), Some(_) => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute), DBItemAttributeError::Missing, )), } } } impl TryFromAttribute for AttributeMap { fn try_from_attr( attribute_name: impl Into, attribute_value: Option, ) -> Result { match attribute_value { Some(AttributeValue::M(map)) => Ok(map), Some(_) => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::Missing, )), } } } impl TryFromAttribute for Vec { fn try_from_attr( attribute_name: impl Into, attribute_value: Option, ) -> Result { match attribute_value { Some(AttributeValue::B(data)) => Ok(data.into_inner()), Some(_) => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::Missing, )), } } } impl TryFromAttribute for HashSet { fn try_from_attr( attribute_name: impl Into, attribute_value: Option, ) -> Result { match attribute_value { Some(AttributeValue::Ss(set)) => Ok(set.into_iter().collect()), Some(_) => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::Missing, )), } } } impl TryFromAttribute for Vec { fn try_from_attr( attribute_name: impl Into, attribute: Option, ) -> Result { let attribute_name = attribute_name.into(); match attribute { Some(AttributeValue::L(list)) => Ok( list .into_iter() .map(|attribute| { T::try_from_attr(format!("{attribute_name}[i]"), Some(attribute)) }) .collect::, _>>()?, ), Some(_) => Err(DBItemError::new( - attribute_name.into(), + attribute_name, Value::AttributeValue(attribute), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( - attribute_name.into(), + attribute_name, Value::AttributeValue(attribute), DBItemAttributeError::Missing, )), } } } #[deprecated = "Use `String::try_from_attr()` instead"] pub fn parse_string_attribute( attribute_name: impl Into, attribute_value: Option, ) -> Result { String::try_from_attr(attribute_name, attribute_value) } #[deprecated = "Use `bool::try_from_attr()` instead"] pub fn parse_bool_attribute( attribute_name: impl Into, attribute_value: Option, ) -> Result { bool::try_from_attr(attribute_name, attribute_value) } #[deprecated = "Use `DateTime::::try_from_attr()` instead"] pub fn parse_datetime_attribute( attribute_name: impl Into, attribute_value: Option, ) -> Result, DBItemError> { DateTime::::try_from_attr(attribute_name, attribute_value) } #[deprecated = "Use `AttributeMap::try_from_attr()` instead"] pub fn parse_map_attribute( attribute_name: impl Into, attribute_value: Option, ) -> Result { attribute_value.attr_try_into(attribute_name) } pub fn parse_int_attribute( attribute_name: impl Into, attribute_value: Option, ) -> Result where T: FromStr, { match &attribute_value { Some(AttributeValue::N(numeric_str)) => { parse_integer(attribute_name, numeric_str) } Some(_) => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::IncorrectType, )), None => Err(DBItemError::new( attribute_name.into(), Value::AttributeValue(attribute_value), DBItemAttributeError::Missing, )), } } /// Parses the UTC timestamp in milliseconds from a DynamoDB numeric attribute pub fn parse_timestamp_attribute( attribute_name: impl Into, attribute_value: Option, ) -> Result, DBItemError> { let attribute_name: String = attribute_name.into(); let timestamp = parse_int_attribute::( attribute_name.clone(), attribute_value.clone(), )?; let naive_datetime = chrono::NaiveDateTime::from_timestamp_millis(timestamp) .ok_or_else(|| { DBItemError::new( attribute_name, Value::AttributeValue(attribute_value), DBItemAttributeError::TimestampOutOfRange, ) })?; Ok(DateTime::from_naive_utc_and_offset(naive_datetime, Utc)) } pub fn parse_integer( attribute_name: impl Into, attribute_value: &str, ) -> Result where T: FromStr, { attribute_value.parse::().map_err(|e| { DBItemError::new( attribute_name.into(), Value::String(attribute_value.into()), DBItemAttributeError::InvalidNumberFormat(e), ) }) } pub mod batch_operations { use aws_sdk_dynamodb::{ error::SdkError, operation::batch_write_item::BatchWriteItemError, types::{KeysAndAttributes, WriteRequest}, Error as DynamoDBError, }; use rand::Rng; use std::time::Duration; use tracing::{debug, trace}; use super::AttributeMap; /// DynamoDB hard limit for single BatchWriteItem request const SINGLE_BATCH_WRITE_ITEM_LIMIT: usize = 25; const SINGLE_BATCH_GET_ITEM_LIMIT: usize = 100; /// Exponential backoff configuration for batch write operation #[derive(derive_more::Constructor, Debug)] pub struct ExponentialBackoffConfig { /// Maximum retry attempts before the function fails. /// Set this to 0 to disable exponential backoff. /// Defaults to **8**. pub max_attempts: u32, /// Base wait duration before retry. Defaults to **25ms**. /// It is doubled with each attempt: 25ms, 50, 100, 200... pub base_duration: Duration, /// Jitter factor for retry delay. Factor 0.5 for 100ms delay /// means that wait time will be between 50ms and 150ms. /// The value must be in range 0.0 - 1.0. It will be clamped /// if out of these bounds. Defaults to **0.3** pub jitter_factor: f32, /// Retry on [`ProvisionedThroughputExceededException`]. /// Defaults to **true**. /// /// [`ProvisionedThroughputExceededException`]: aws_sdk_dynamodb::Error::ProvisionedThroughputExceededException pub retry_on_provisioned_capacity_exceeded: bool, } impl Default for ExponentialBackoffConfig { fn default() -> Self { ExponentialBackoffConfig { max_attempts: 8, base_duration: Duration::from_millis(25), jitter_factor: 0.3, retry_on_provisioned_capacity_exceeded: true, } } } impl ExponentialBackoffConfig { fn new_counter(&self) -> ExponentialBackoffHelper { ExponentialBackoffHelper::new(self) } fn backoff_enabled(&self) -> bool { self.max_attempts > 0 } fn should_retry_on_capacity_exceeded(&self) -> bool { self.backoff_enabled() && self.retry_on_provisioned_capacity_exceeded } } #[tracing::instrument(name = "batch_get", skip(ddb, primary_keys, config))] pub async fn batch_get( ddb: &aws_sdk_dynamodb::Client, table_name: &str, primary_keys: K, projection_expression: Option, config: ExponentialBackoffConfig, ) -> Result, super::Error> where K: IntoIterator, K::Item: Into, { let mut primary_keys: Vec<_> = primary_keys.into_iter().map(Into::into).collect(); let mut results = Vec::with_capacity(primary_keys.len()); tracing::debug!( ?config, "Starting batch read operation of {} items...", primary_keys.len() ); let mut exponential_backoff = config.new_counter(); let mut backup = Vec::with_capacity(SINGLE_BATCH_GET_ITEM_LIMIT); loop { let items_to_drain = std::cmp::min(primary_keys.len(), SINGLE_BATCH_GET_ITEM_LIMIT); let chunk = primary_keys.drain(..items_to_drain).collect::>(); if chunk.is_empty() { // No more items tracing::trace!("No more items to process. Exiting"); break; } // we don't need the backup when we don't retry if config.should_retry_on_capacity_exceeded() { chunk.clone_into(&mut backup); } tracing::trace!("Attempting to get chunk of {} items...", chunk.len()); let result = ddb .batch_get_item() .request_items( table_name, KeysAndAttributes::builder() .set_keys(Some(chunk)) .consistent_read(true) .set_projection_expression(projection_expression.clone()) .build(), ) .send() .await; match result { Ok(output) => { if let Some(mut responses) = output.responses { if let Some(items) = responses.remove(table_name) { tracing::trace!("Successfully read {} items", items.len()); results.extend(items); } } else { tracing::warn!("Responses was None"); } if let Some(mut unprocessed) = output.unprocessed_keys { let keys_to_retry = match unprocessed.remove(table_name) { Some(KeysAndAttributes { keys: Some(keys), .. }) if !keys.is_empty() => keys, _ => { tracing::trace!("Chunk read successfully. Continuing."); exponential_backoff.reset(); continue; } }; exponential_backoff.sleep_and_retry().await?; tracing::debug!( "Some items failed. Retrying {} requests", keys_to_retry.len() ); primary_keys.extend(keys_to_retry); } else { tracing::trace!("Unprocessed items was None"); } } Err(error) => { let error: DynamoDBError = error.into(); if !matches!( error, DynamoDBError::ProvisionedThroughputExceededException(_) ) { tracing::error!("BatchGetItem failed: {0:?} - {0}", error); return Err(error.into()); } tracing::warn!("Provisioned capacity exceeded!"); if !config.retry_on_provisioned_capacity_exceeded { return Err(error.into()); } exponential_backoff.sleep_and_retry().await?; primary_keys.append(&mut backup); trace!("Retrying now..."); } }; } debug!("Batch read completed."); Ok(results) } /// Performs a single DynamoDB table batch write operation. If the batch /// contains more than 25 items, it is split into chunks. /// /// The function uses exponential backoff retries when AWS throttles /// the request or maximum provisioned capacity is exceeded #[tracing::instrument(name = "batch_write", skip(ddb, requests, config))] pub async fn batch_write( ddb: &aws_sdk_dynamodb::Client, table_name: &str, mut requests: Vec, config: ExponentialBackoffConfig, ) -> Result<(), super::Error> { tracing::debug!( ?config, "Starting batch write operation of {} items...", requests.len() ); let mut exponential_backoff = config.new_counter(); let mut backup = Vec::with_capacity(SINGLE_BATCH_WRITE_ITEM_LIMIT); loop { let items_to_drain = std::cmp::min(requests.len(), SINGLE_BATCH_WRITE_ITEM_LIMIT); let chunk = requests.drain(..items_to_drain).collect::>(); if chunk.is_empty() { // No more items tracing::trace!("No more items to process. Exiting"); break; } // we don't need the backup when we don't retry if config.should_retry_on_capacity_exceeded() { chunk.clone_into(&mut backup); } tracing::trace!("Attempting to write chunk of {} items...", chunk.len()); let result = ddb .batch_write_item() .request_items(table_name, chunk) .send() .await; match result { Ok(output) => { if let Some(mut items) = output.unprocessed_items { let requests_to_retry = items.remove(table_name).unwrap_or_default(); if requests_to_retry.is_empty() { tracing::trace!("Chunk written successfully. Continuing."); exponential_backoff.reset(); continue; } exponential_backoff.sleep_and_retry().await?; tracing::debug!( "Some items failed. Retrying {} requests", requests_to_retry.len() ); requests.extend(requests_to_retry); } else { tracing::trace!("Unprocessed items was None"); } } Err(error) => { if !is_provisioned_capacity_exceeded(&error) { tracing::error!("BatchWriteItem failed: {0:?} - {0}", error); return Err(super::Error::AwsSdk(error.into())); } tracing::warn!("Provisioned capacity exceeded!"); if !config.retry_on_provisioned_capacity_exceeded { return Err(super::Error::AwsSdk(error.into())); } exponential_backoff.sleep_and_retry().await?; requests.append(&mut backup); trace!("Retrying now..."); } }; } debug!("Batch write completed."); Ok(()) } /// internal helper struct struct ExponentialBackoffHelper<'cfg> { config: &'cfg ExponentialBackoffConfig, attempt: u32, } impl<'cfg> ExponentialBackoffHelper<'cfg> { fn new(config: &'cfg ExponentialBackoffConfig) -> Self { ExponentialBackoffHelper { config, attempt: 0 } } /// reset counter after successfull operation fn reset(&mut self) { self.attempt = 0; } /// increase counter and sleep in case of failure async fn sleep_and_retry(&mut self) -> Result<(), super::Error> { let jitter_factor = 1f32.min(0f32.max(self.config.jitter_factor)); let random_multiplier = 1.0 + rand::thread_rng().gen_range(-jitter_factor..=jitter_factor); let backoff_multiplier = 2u32.pow(self.attempt); let base_duration = self.config.base_duration * backoff_multiplier; let sleep_duration = base_duration.mul_f32(random_multiplier); self.attempt += 1; if self.attempt > self.config.max_attempts { tracing::warn!("Retry limit exceeded!"); return Err(super::Error::MaxRetriesExceeded); } tracing::debug!( attempt = self.attempt, "Batch failed. Sleeping for {}ms before retrying...", sleep_duration.as_millis() ); tokio::time::sleep(sleep_duration).await; Ok(()) } } /// Check if transaction failed due to /// `ProvisionedThroughputExceededException` exception fn is_provisioned_capacity_exceeded( err: &SdkError, ) -> bool { let SdkError::ServiceError(service_error) = err else { return false; }; matches!( service_error.err(), BatchWriteItemError::ProvisionedThroughputExceededException(_) ) } } #[derive(Debug, Clone, Copy, derive_more::Display, derive_more::Error)] pub struct UnknownAttributeTypeError; fn calculate_attr_value_size_in_db( value: &AttributeValue, ) -> Result { const ELEMENT_BYTE_OVERHEAD: usize = 1; const CONTAINER_BYTE_OVERHEAD: usize = 3; /// AWS doesn't provide an exact algorithm for calculating number size in bytes /// in case they change the internal representation. We know that number can use /// between 2 and 21 bytes so we use the maximum value as the byte size. const NUMBER_BYTE_SIZE: usize = 21; let result = match value { AttributeValue::B(blob) => blob.as_ref().len(), AttributeValue::L(list) => { CONTAINER_BYTE_OVERHEAD + list.len() * ELEMENT_BYTE_OVERHEAD + list .iter() .try_fold(0, |a, v| Ok(a + calculate_attr_value_size_in_db(v)?))? } AttributeValue::M(map) => { CONTAINER_BYTE_OVERHEAD + map.len() * ELEMENT_BYTE_OVERHEAD + calculate_size_in_db(map)? } AttributeValue::Bool(_) | AttributeValue::Null(_) => 1, AttributeValue::Bs(set) => set.len(), AttributeValue::N(_) => NUMBER_BYTE_SIZE, AttributeValue::Ns(set) => set.len() * NUMBER_BYTE_SIZE, AttributeValue::S(string) => string.as_bytes().len(), AttributeValue::Ss(set) => { set.iter().map(|string| string.as_bytes().len()).sum() } _ => return Err(UnknownAttributeTypeError), }; Ok(result) } pub fn calculate_size_in_db( value: &AttributeMap, ) -> Result { value.iter().try_fold(0, |a, (attr, value)| { Ok(a + attr.as_bytes().len() + calculate_attr_value_size_in_db(value)?) }) } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_integer() { assert!(parse_integer::("some_attr", "123").is_ok()); assert!(parse_integer::("negative", "-123").is_ok()); assert!(parse_integer::("float", "3.14").is_err()); assert!(parse_integer::("NaN", "foo").is_err()); assert!(parse_integer::("negative_uint", "-123").is_err()); assert!(parse_integer::("too_large", "65536").is_err()); } #[test] fn test_parse_timestamp() { let timestamp = Utc::now().timestamp_millis(); let attr = AttributeValue::N(timestamp.to_string()); let parsed_timestamp = parse_timestamp_attribute("some_attr", Some(attr)); assert!(parsed_timestamp.is_ok()); assert_eq!(parsed_timestamp.unwrap().timestamp_millis(), timestamp); } #[test] fn test_parse_invalid_timestamp() { let attr = AttributeValue::N("foo".to_string()); let parsed_timestamp = parse_timestamp_attribute("some_attr", Some(attr)); assert!(parsed_timestamp.is_err()); } #[test] fn test_parse_timestamp_out_of_range() { let attr = AttributeValue::N(i64::MAX.to_string()); let parsed_timestamp = parse_timestamp_attribute("some_attr", Some(attr)); assert!(parsed_timestamp.is_err()); assert!(matches!( parsed_timestamp.unwrap_err().attribute_error, DBItemAttributeError::TimestampOutOfRange )); } } diff --git a/shared/comm-lib/src/database/blob.rs b/shared/comm-lib/src/database/blob.rs index 113a573a4..d6b46af32 100644 --- a/shared/comm-lib/src/database/blob.rs +++ b/shared/comm-lib/src/database/blob.rs @@ -1,97 +1,99 @@ use super::{AttributeExtractor, AttributeMap, DBItemError, TryFromAttribute}; use crate::blob::{ client::{BlobServiceClient, BlobServiceError}, types::BlobInfo, }; use aws_sdk_dynamodb::{primitives::Blob, types::AttributeValue}; use bytes::Bytes; use tokio_stream::StreamExt; /// Represents array of bytes that could be either stored inline in /// dynamodb or in blob, depending on the size. #[derive(Clone, Debug)] pub enum BlobOrDBContent { /// Shouldn't be created manually! First create [`BlobOrDBContent::Database`] /// either directly or using [`BlobOrDBContent::new`] and call /// [`BlobOrDBContent::move_to_blob`]. Blob(BlobInfo), Database(Vec), } impl BlobOrDBContent { /// Creates a new [`BlobOrDBContent::Database`] pub fn new(bytes: Vec) -> Self { Self::Database(bytes) } /// Returns a tuple of attribute name and value for this content pub fn into_attr_pair( self, in_blob_attr: impl Into, in_db_attr: impl Into, ) -> (String, AttributeValue) { match self { Self::Blob(blob_info) => (in_blob_attr.into(), blob_info.into()), Self::Database(data) => { (in_db_attr.into(), AttributeValue::B(Blob::new(data))) } } } pub fn parse_from_attrs( attrs: &mut AttributeMap, in_blob_attr: impl Into, in_db_attr: &str, ) -> Result { let in_blob_attr = in_blob_attr.into(); if let Some(blob_info_attr) = attrs.remove(&in_blob_attr) { let blob_info = BlobInfo::try_from_attr(in_blob_attr, Some(blob_info_attr))?; return Ok(Self::Blob(blob_info)); } - let content_data = attrs.take_attr(in_db_attr.into())?; + let content_data = attrs.take_attr(in_db_attr)?; Ok(Self::Database(content_data)) } /// Moves content to blob storage: /// - Switches `self` from [`BlobOrDBContent::Database`] to [`BlobOrDBContent::Blob`] /// - No-op for [`BlobOrDBContent::Blob`] pub async fn move_to_blob( &mut self, blob_client: &BlobServiceClient, ) -> Result<(), BlobServiceError> { - let Self::Database(ref mut contents) = self else { return Ok(()); }; + let Self::Database(ref mut contents) = self else { + return Ok(()); + }; let data = std::mem::take(contents); let new_blob_info = BlobInfo::from_bytes(&data); // NOTE: We send the data as a single chunk. This shouldn't be a problem // unless we start trying to send large byte payloads. In that case, we should // consider splitting the data into chunks and sending them as a stream. let data_stream = tokio_stream::once(Result::<_, std::io::Error>::Ok(data)); blob_client .simple_put(&new_blob_info.blob_hash, &new_blob_info.holder, data_stream) .await?; *self = Self::Blob(new_blob_info); Ok(()) } /// Fetches content bytes pub async fn fetch_bytes( self, blob_client: &BlobServiceClient, ) -> Result, BlobServiceError> { match self { Self::Database(data) => Ok(data), Self::Blob(BlobInfo { blob_hash, .. }) => { let stream = blob_client.get(&blob_hash).await?; let chunks: Vec = stream.collect::>().await?; let data = chunks.into_iter().flatten().collect(); Ok(data) } } } } diff --git a/shared/comm-lib/src/tools.rs b/shared/comm-lib/src/tools.rs index 3dc2560de..0f9bd646e 100644 --- a/shared/comm-lib/src/tools.rs +++ b/shared/comm-lib/src/tools.rs @@ -1,166 +1,166 @@ use rand::{distributions::DistString, CryptoRng, Rng}; // colon is valid because it is used as a separator // in some backup service identifiers -const VALID_IDENTIFIER_CHARS: &'static [char] = &['_', '-', '=', ':']; +const VALID_IDENTIFIER_CHARS: &[char] = &['_', '-', '=', ':']; /// Checks if the given string is a valid identifier for an entity /// (e.g. backup ID, blob hash, blob holder). /// /// Some popular identifier formats are considered valid, including UUID, /// nanoid, base64url. On the other hand, path or url-like identifiers /// are not supposed to be valid pub fn is_valid_identifier(identifier: &str) -> bool { if identifier.is_empty() { return false; } identifier .chars() .all(|c| c.is_ascii_alphanumeric() || VALID_IDENTIFIER_CHARS.contains(&c)) } pub type BoxedError = Box; /// Defers call of the provided function to when [Defer] goes out of scope. /// This can be used for cleanup code that must be run when e.g. the enclosing /// function exits either by return or try operator `?`. /// /// # Example /// ```ignore /// fn f(){ /// let _ = Defer::new(|| println!("cleanup")) /// /// // Cleanup will run if function would exit here /// operation_that_can_fail()?; /// /// if should_exit_early { /// // Cleanup will run if function would exit here /// return; /// } /// } /// ``` pub struct Defer<'s>(Option>); impl<'s> Defer<'s> { pub fn new(f: impl FnOnce() + 's) -> Self { Self(Some(Box::new(f))) } /// Consumes the value, without calling the provided function /// /// # Example /// ```ignore /// // Start a "transaction" /// operation_that_should_be_reverted(); /// let revert = Defer::new(|| println!("revert")) /// operation_that_can_fail()?; /// operation_that_can_fail()?; /// operation_that_can_fail()?; /// // Now we can "commit" the changes /// revert.cancel(); /// ``` pub fn cancel(mut self) { self.0 = None; // Implicit drop } } impl Drop for Defer<'_> { fn drop(&mut self) { if let Some(f) = self.0.take() { f(); } } } pub fn generate_random_string( length: usize, rng: &mut (impl Rng + CryptoRng), ) -> String { rand::distributions::Alphanumeric.sample_string(rng, length) } #[cfg(test)] mod valid_identifier_tests { use super::*; #[test] fn alphanumeric_identifier() { assert!(is_valid_identifier("some_identifier_v123")); } #[test] fn alphanumeric_with_colon() { assert!(is_valid_identifier("some_identifier:with_colon")); } #[test] fn uuid_is_valid() { let example_uuid = "a2b9e4d4-8d1f-4c7f-9c3d-5f4e4e6b1d1d"; assert!(is_valid_identifier(example_uuid)); } #[test] fn base64url_is_valid() { let example_base64url = "VGhlIP3-aWNrIGJyb3duIGZveCBqciAxMyBsYXp5IGRvZ_7_"; assert!(is_valid_identifier(example_base64url)) } #[test] fn standard_base64_is_invalid() { let example_base64 = "VGhlIP3+aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIDEzIGxhenkgZG9n/v8="; assert!(!is_valid_identifier(example_base64)); } #[test] fn path_is_invalid() { assert!(!is_valid_identifier("some/path")); } #[test] fn url_is_invalid() { assert!(!is_valid_identifier("https://example.com")); } #[test] fn empty_is_invalid() { assert!(!is_valid_identifier("")); } #[test] fn defer_runs() { fn f(a: &mut bool) { let _ = Defer::new(|| *a = true); } let mut v = false; f(&mut v); assert!(v) } #[test] fn consumed_defer_doesnt_run() { fn f(a: &mut bool) { let defer = Defer::new(|| *a = true); defer.cancel(); } let mut v = false; f(&mut v); assert!(!v) } #[test] fn defer_runs_on_try() { fn f(a: &mut bool) -> Result<(), ()> { let _ = Defer::new(|| *a = true); Err(()) } let mut v = false; let _ = f(&mut v); assert!(v) } }