Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F3163684
D13876.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
5 KB
Referenced Files
None
Subscribers
None
D13876.diff
View Options
diff --git a/services/backup/src/http/handlers/backup.rs b/services/backup/src/http/handlers/backup.rs
--- a/services/backup/src/http/handlers/backup.rs
+++ b/services/backup/src/http/handlers/backup.rs
@@ -85,6 +85,105 @@
Ok(HttpResponse::Ok().finish())
}
+#[instrument(skip_all, fields(backup_id))]
+pub async fn upload_user_keys(
+ user: UserIdentity,
+ blob_client: Authenticated<BlobServiceClient>,
+ db_client: web::Data<DatabaseClient>,
+ mut multipart: actix_multipart::Multipart,
+) -> actix_web::Result<HttpResponse> {
+ let backup_id = get_named_text_field("backup_id", &mut multipart).await?;
+ let blob_client = blob_client.with_user_identity(user.clone());
+ tracing::Span::current().record("backup_id", &backup_id);
+
+ info!("Backup User Keys upload started");
+
+ let (user_keys_blob_info, user_keys_revoke) = forward_field_to_blob(
+ &mut multipart,
+ &blob_client,
+ "user_keys_hash",
+ "user_keys",
+ )
+ .await?;
+
+ let siwe_backup_msg = get_siwe_backup_msg(&mut multipart).await?;
+
+ let ordered_backup_item = db_client
+ .find_last_backup_item(&user.user_id)
+ .await
+ .map_err(BackupError::from)?;
+
+ let old_backup_item = match ordered_backup_item {
+ None => None,
+ Some(item) => db_client
+ .find_backup_item(&user.user_id, &item.backup_id)
+ .await
+ .map_err(BackupError::from)?,
+ };
+
+ let mut revokes = Vec::new();
+
+ let (user_data, attachments) = match old_backup_item.clone() {
+ None => (None, Vec::new()),
+ // If attachments and user_data exists, we need to create holder.
+ // Otherwise, cleanup can remove this data.
+ Some(item) => {
+ let attachments_hashes: Vec<String> = item
+ .attachments
+ .iter()
+ .map(|attachment| attachment.blob_hash.clone())
+ .collect();
+
+ let (attachments, attachments_revokes) =
+ process_blob_hashes(attachments_hashes, &blob_client).await?;
+
+ revokes.extend(attachments_revokes);
+
+ match item.user_data {
+ None => (None, attachments),
+ Some(data) => {
+ let (blob_infos, defers) =
+ process_blob_hashes(vec![data.blob_hash], &blob_client).await?;
+
+ let blob_info = blob_infos
+ .into_iter()
+ .next()
+ .ok_or(BackupError::BadRequest)?;
+ revokes.extend(defers);
+
+ (Some(blob_info), attachments)
+ }
+ }
+ }
+ };
+
+ let item = BackupItem::new(
+ user.user_id.clone(),
+ backup_id,
+ user_keys_blob_info,
+ user_data,
+ attachments,
+ siwe_backup_msg,
+ );
+
+ db_client
+ .put_backup_item(item)
+ .await
+ .map_err(BackupError::from)?;
+
+ user_keys_revoke.cancel();
+ for attachment_revoke in revokes {
+ attachment_revoke.cancel();
+ }
+
+ db_client
+ .remove_old_backups(&user.user_id, &blob_client)
+ .await
+ .map_err(BackupError::from)?;
+
+ Ok(HttpResponse::Ok().finish())
+}
+
#[instrument(skip_all, fields(hash_field_name, data_field_name))]
async fn forward_field_to_blob<'revoke, 'blob: 'revoke>(
multipart: &mut actix_multipart::Multipart,
@@ -155,34 +254,49 @@
}
#[instrument(skip_all)]
-async fn create_attachment_holder<'revoke, 'blob: 'revoke>(
- attachment: &str,
+async fn create_holder<'revoke, 'blob: 'revoke>(
+ hash: &str,
blob_client: &'blob BlobServiceClient,
) -> Result<(BlobInfo, Defer<'revoke>), BackupError> {
let holder = uuid::Uuid::new_v4().to_string();
if !blob_client
- .assign_holder(attachment, &holder)
+ .assign_holder(hash, &holder)
.await
.map_err(BackupError::from)?
{
- warn!("Blob attachment with hash {attachment:?} doesn't exist");
+ warn!("Blob with hash {hash:?} doesn't exist");
}
- let revoke_hash = attachment.to_string();
+ let revoke_hash = hash.to_string();
let revoke_holder = holder.clone();
let revoke_holder = Defer::new(|| {
blob_client.schedule_revoke_holder(revoke_hash, revoke_holder)
});
let blob_info = BlobInfo {
- blob_hash: attachment.to_string(),
+ blob_hash: hash.to_string(),
holder,
};
Ok((blob_info, revoke_holder))
}
+async fn process_blob_hashes<'revoke, 'blob: 'revoke>(
+ hashes: Vec<String>,
+ blob_client: &'blob BlobServiceClient,
+) -> Result<(Vec<BlobInfo>, Vec<Defer<'revoke>>), BackupError> {
+ let mut blob_infos = Vec::new();
+ let mut revokes = Vec::new();
+ for hash in hashes {
+ let (blob_info, revoke) = create_holder(&hash, blob_client).await?;
+ blob_infos.push(blob_info);
+ revokes.push(revoke);
+ }
+
+ Ok((blob_infos, revokes))
+}
+
#[instrument(skip_all)]
async fn process_attachments<'revoke, 'blob: 'revoke>(
multipart: &mut actix_multipart::Multipart,
@@ -204,16 +318,7 @@
Err(_) => return Err(BackupError::BadRequest),
};
- let mut attachments = Vec::new();
- let mut attachments_revokes = Vec::new();
- for attachment_hash in attachments_hashes {
- let (attachment, revoke) =
- create_attachment_holder(&attachment_hash, blob_client).await?;
- attachments.push(attachment);
- attachments_revokes.push(revoke);
- }
-
- Ok((attachments, attachments_revokes))
+ process_blob_hashes(attachments_hashes, blob_client).await
}
#[instrument(skip_all)]
diff --git a/services/backup/src/http/mod.rs b/services/backup/src/http/mod.rs
--- a/services/backup/src/http/mod.rs
+++ b/services/backup/src/http/mod.rs
@@ -60,6 +60,14 @@
.service(
web::resource("").route(web::post().to(handlers::backup::upload)),
)
+ // Uploads User Keys and creates a new backup.
+ // User Keys are mandatory for this operation.
+ // User Data is not required, data from the previous
+ // backup will be used for the new backup (User Data and Attachments).
+ .service(
+ web::resource("/user_keys")
+ .route(web::post().to(handlers::backup::upload_user_keys)),
+ )
.service(
web::resource("{backup_id}/user_keys")
.route(web::get().to(handlers::backup::download_user_keys)),
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Thu, Nov 7, 3:23 PM (20 h, 1 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2436747
Default Alt Text
D13876.diff (5 KB)
Attached To
Mode
D13876: [backup-service] implement endpoint to upload User Keys
Attached
Detach File
Event Timeline
Log In to Comment