diff --git a/native/cpp/CommonCpp/DatabaseManagers/SQLiteQueryExecutor.cpp b/native/cpp/CommonCpp/DatabaseManagers/SQLiteQueryExecutor.cpp index 6adf74071..189bcd346 100644 --- a/native/cpp/CommonCpp/DatabaseManagers/SQLiteQueryExecutor.cpp +++ b/native/cpp/CommonCpp/DatabaseManagers/SQLiteQueryExecutor.cpp @@ -1,2360 +1,2377 @@ #include "SQLiteQueryExecutor.h" #include "Logger.h" #include "entities/CommunityInfo.h" #include "entities/EntityQueryHelpers.h" #include "entities/IntegrityThreadHash.h" #include "entities/KeyserverInfo.h" #include "entities/Metadata.h" #include "entities/SyncedMetadataEntry.h" #include "entities/UserInfo.h" #include #include #include #ifndef EMSCRIPTEN #include "../CryptoTools/CryptoModule.h" #include "../Notifications/BackgroundDataStorage/NotificationsCryptoModule.h" #include "CommSecureStore.h" #include "PlatformSpecificTools.h" #include "StaffUtils.h" #endif const int CONTENT_ACCOUNT_ID = 1; const int NOTIFS_ACCOUNT_ID = 2; namespace comm { std::string SQLiteQueryExecutor::sqliteFilePath; std::string SQLiteQueryExecutor::encryptionKey; std::once_flag SQLiteQueryExecutor::initialized; int SQLiteQueryExecutor::sqlcipherEncryptionKeySize = 64; // Should match constant defined in `native_rust_library/src/constants.rs` std::string SQLiteQueryExecutor::secureStoreEncryptionKeyID = "comm.encryptionKey"; int SQLiteQueryExecutor::backupLogsEncryptionKeySize = 32; std::string SQLiteQueryExecutor::secureStoreBackupLogsEncryptionKeyID = "comm.backupLogsEncryptionKey"; std::string SQLiteQueryExecutor::backupLogsEncryptionKey; #ifndef EMSCRIPTEN NativeSQLiteConnectionManager SQLiteQueryExecutor::connectionManager; std::unordered_set SQLiteQueryExecutor::backedUpTablesBlocklist = { "olm_persist_account", "olm_persist_sessions", "metadata", "messages_to_device", "integrity_store", "persist_storage", + "keyservers", }; #else SQLiteConnectionManager SQLiteQueryExecutor::connectionManager; #endif bool create_table(sqlite3 *db, std::string query, std::string tableName) { char *error; sqlite3_exec(db, query.c_str(), nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error creating '" << tableName << "' table: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool create_drafts_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS drafts (threadID TEXT UNIQUE PRIMARY KEY, " "text TEXT);"; return create_table(db, query, "drafts"); } bool rename_threadID_to_key(sqlite3 *db) { sqlite3_stmt *key_column_stmt; sqlite3_prepare_v2( db, "SELECT name AS col_name FROM pragma_table_xinfo ('drafts') WHERE " "col_name='key';", -1, &key_column_stmt, nullptr); sqlite3_step(key_column_stmt); auto num_bytes = sqlite3_column_bytes(key_column_stmt, 0); sqlite3_finalize(key_column_stmt); if (num_bytes) { return true; } char *error; sqlite3_exec( db, "ALTER TABLE drafts RENAME COLUMN `threadID` TO `key`;", nullptr, nullptr, &error); if (error) { std::ostringstream stringStream; stringStream << "Error occurred renaming threadID column in drafts table " << "to key: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } return true; } bool create_persist_account_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS olm_persist_account(" "id INTEGER UNIQUE PRIMARY KEY NOT NULL, " "account_data TEXT NOT NULL);"; return create_table(db, query, "olm_persist_account"); } bool create_persist_sessions_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS olm_persist_sessions(" "target_user_id TEXT UNIQUE PRIMARY KEY NOT NULL, " "session_data TEXT NOT NULL);"; return create_table(db, query, "olm_persist_sessions"); } bool drop_messages_table(sqlite3 *db) { char *error; sqlite3_exec(db, "DROP TABLE IF EXISTS messages;", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error dropping 'messages' table: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool recreate_messages_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS messages ( " "id TEXT UNIQUE PRIMARY KEY NOT NULL, " "local_id TEXT, " "thread TEXT NOT NULL, " "user TEXT NOT NULL, " "type INTEGER NOT NULL, " "future_type INTEGER, " "content TEXT, " "time INTEGER NOT NULL);"; return create_table(db, query, "messages"); } bool create_messages_idx_thread_time(sqlite3 *db) { char *error; sqlite3_exec( db, "CREATE INDEX IF NOT EXISTS messages_idx_thread_time " "ON messages (thread, time);", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error creating (thread, time) index on messages table: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool create_media_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS media ( " "id TEXT UNIQUE PRIMARY KEY NOT NULL, " "container TEXT NOT NULL, " "thread TEXT NOT NULL, " "uri TEXT NOT NULL, " "type TEXT NOT NULL, " "extras TEXT NOT NULL);"; return create_table(db, query, "media"); } bool create_media_idx_container(sqlite3 *db) { char *error; sqlite3_exec( db, "CREATE INDEX IF NOT EXISTS media_idx_container " "ON media (container);", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error creating (container) index on media table: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool create_threads_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS threads ( " "id TEXT UNIQUE PRIMARY KEY NOT NULL, " "type INTEGER NOT NULL, " "name TEXT, " "description TEXT, " "color TEXT NOT NULL, " "creation_time BIGINT NOT NULL, " "parent_thread_id TEXT, " "containing_thread_id TEXT, " "community TEXT, " "members TEXT NOT NULL, " "roles TEXT NOT NULL, " "current_user TEXT NOT NULL, " "source_message_id TEXT, " "replies_count INTEGER NOT NULL);"; return create_table(db, query, "threads"); } bool update_threadID_for_pending_threads_in_drafts(sqlite3 *db) { char *error; sqlite3_exec( db, "UPDATE drafts SET key = " "REPLACE(REPLACE(REPLACE(REPLACE(key, 'type4/', '')," "'type5/', ''),'type6/', ''),'type7/', '')" "WHERE key LIKE 'pending/%'", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error update pending threadIDs on drafts table: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool enable_write_ahead_logging_mode(sqlite3 *db) { char *error; sqlite3_exec(db, "PRAGMA journal_mode=wal;", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error enabling write-ahead logging mode: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool create_metadata_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS metadata ( " "name TEXT UNIQUE PRIMARY KEY NOT NULL, " "data TEXT);"; return create_table(db, query, "metadata"); } bool add_not_null_constraint_to_drafts(sqlite3 *db) { char *error; sqlite3_exec( db, "CREATE TABLE IF NOT EXISTS temporary_drafts (" "key TEXT UNIQUE PRIMARY KEY NOT NULL, " "text TEXT NOT NULL);" "INSERT INTO temporary_drafts SELECT * FROM drafts " "WHERE key IS NOT NULL AND text IS NOT NULL;" "DROP TABLE drafts;" "ALTER TABLE temporary_drafts RENAME TO drafts;", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error adding NOT NULL constraint to drafts table: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool add_not_null_constraint_to_metadata(sqlite3 *db) { char *error; sqlite3_exec( db, "CREATE TABLE IF NOT EXISTS temporary_metadata (" "name TEXT UNIQUE PRIMARY KEY NOT NULL, " "data TEXT NOT NULL);" "INSERT INTO temporary_metadata SELECT * FROM metadata " "WHERE data IS NOT NULL;" "DROP TABLE metadata;" "ALTER TABLE temporary_metadata RENAME TO metadata;", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error adding NOT NULL constraint to metadata table: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool add_avatar_column_to_threads_table(sqlite3 *db) { char *error; sqlite3_exec( db, "ALTER TABLE threads ADD COLUMN avatar TEXT;", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error adding avatar column to threads table: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool add_pinned_count_column_to_threads(sqlite3 *db) { sqlite3_stmt *pinned_column_stmt; sqlite3_prepare_v2( db, "SELECT name AS col_name FROM pragma_table_xinfo ('threads') WHERE " "col_name='pinned_count';", -1, &pinned_column_stmt, nullptr); sqlite3_step(pinned_column_stmt); auto num_bytes = sqlite3_column_bytes(pinned_column_stmt, 0); sqlite3_finalize(pinned_column_stmt); if (num_bytes) { return true; } char *error; sqlite3_exec( db, "ALTER TABLE threads ADD COLUMN pinned_count INTEGER NOT NULL DEFAULT 0;", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error adding pinned_count column to threads table: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } bool create_message_store_threads_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS message_store_threads (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " start_reached INTEGER NOT NULL," " last_navigated_to BIGINT NOT NULL," " last_pruned BIGINT NOT NULL" ");"; return create_table(db, query, "message_store_threads"); } bool create_reports_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS reports (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " report TEXT NOT NULL" ");"; return create_table(db, query, "reports"); } bool create_persist_storage_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS persist_storage (" " key TEXT UNIQUE PRIMARY KEY NOT NULL," " item TEXT NOT NULL" ");"; return create_table(db, query, "persist_storage"); } bool recreate_message_store_threads_table(sqlite3 *db) { char *errMsg = 0; // 1. Create table without `last_navigated_to` or `last_pruned`. std::string create_new_table_query = "CREATE TABLE IF NOT EXISTS temp_message_store_threads (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " start_reached INTEGER NOT NULL" ");"; if (sqlite3_exec(db, create_new_table_query.c_str(), NULL, NULL, &errMsg) != SQLITE_OK) { Logger::log( "Error creating temp_message_store_threads: " + std::string{errMsg}); sqlite3_free(errMsg); return false; } // 2. Dump data from existing `message_store_threads` table into temp table. std::string copy_data_query = "INSERT INTO temp_message_store_threads (id, start_reached)" "SELECT id, start_reached FROM message_store_threads;"; if (sqlite3_exec(db, copy_data_query.c_str(), NULL, NULL, &errMsg) != SQLITE_OK) { Logger::log( "Error dumping data from existing message_store_threads to " "temp_message_store_threads: " + std::string{errMsg}); sqlite3_free(errMsg); return false; } // 3. Drop the existing `message_store_threads` table. std::string drop_old_table_query = "DROP TABLE message_store_threads;"; if (sqlite3_exec(db, drop_old_table_query.c_str(), NULL, NULL, &errMsg) != SQLITE_OK) { Logger::log( "Error dropping message_store_threads table: " + std::string{errMsg}); sqlite3_free(errMsg); return false; } // 4. Rename the temp table back to `message_store_threads`. std::string rename_table_query = "ALTER TABLE temp_message_store_threads RENAME TO message_store_threads;"; if (sqlite3_exec(db, rename_table_query.c_str(), NULL, NULL, &errMsg) != SQLITE_OK) { Logger::log( "Error renaming temp_message_store_threads to message_store_threads: " + std::string{errMsg}); sqlite3_free(errMsg); return false; } return true; } bool create_users_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS users (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " user_info TEXT NOT NULL" ");"; return create_table(db, query, "users"); } bool create_keyservers_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS keyservers (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " keyserver_info TEXT NOT NULL" ");"; return create_table(db, query, "keyservers"); } bool enable_rollback_journal_mode(sqlite3 *db) { char *error; sqlite3_exec(db, "PRAGMA journal_mode=DELETE;", nullptr, nullptr, &error); if (!error) { return true; } std::stringstream error_message; error_message << "Error disabling write-ahead logging mode: " << error; Logger::log(error_message.str()); sqlite3_free(error); return false; } bool create_communities_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS communities (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " community_info TEXT NOT NULL" ");"; return create_table(db, query, "communities"); } bool create_messages_to_device_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS messages_to_device (" " message_id TEXT NOT NULL," " device_id TEXT NOT NULL," " user_id TEXT NOT NULL," " timestamp BIGINT NOT NULL," " plaintext TEXT NOT NULL," " ciphertext TEXT NOT NULL," " PRIMARY KEY (message_id, device_id)" ");" "CREATE INDEX IF NOT EXISTS messages_to_device_idx_id_timestamp" " ON messages_to_device (device_id, timestamp);"; return create_table(db, query, "messages_to_device"); } bool create_integrity_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS integrity_store (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " thread_hash TEXT NOT NULL" ");"; return create_table(db, query, "integrity_store"); } bool migrate_notifs_crypto_account(sqlite3 *db) { #ifndef EMSCRIPTEN std::string legacyCryptoAccountDataKey = "cryptoAccountDataKey"; folly::Optional secretKey = CommSecureStore::get(legacyCryptoAccountDataKey); if (!secretKey.hasValue()) { return false; } std::unique_ptr legacyNotifsAccount = NotificationsCryptoModule::migrateLegacyNotificationsCryptoModule(); if (!legacyNotifsAccount) { return true; } std::string insert_notifs_account_query = "REPLACE INTO olm_persist_account (id, account_data) " "VALUES (?, ?);"; crypto::Persist legacyNotifsPersist = legacyNotifsAccount->storeAsB64(secretKey.value()); std::string notifsAccountData = std::string( legacyNotifsPersist.account.begin(), legacyNotifsPersist.account.end()); replaceEntity( db, insert_notifs_account_query, {NOTIFS_ACCOUNT_ID, notifsAccountData}); return true; #else return true; #endif } bool create_synced_metadata_table(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS synced_metadata (" " name TEXT UNIQUE PRIMARY KEY NOT NULL," " data TEXT NOT NULL" ");"; return create_table(db, query, "synced_metadata"); } bool create_keyservers_synced(sqlite3 *db) { std::string query = "CREATE TABLE IF NOT EXISTS keyservers_synced (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " keyserver_info TEXT NOT NULL" ");"; - return create_table(db, query, "keyservers_synced"); + bool success = create_table(db, query, "keyservers_synced"); + if (!success) { + return false; + } + + std::string copyData = + "INSERT INTO keyservers_synced (id, keyserver_info)" + "SELECT id, keyserver_info " + "FROM keyservers;"; + + char *error; + sqlite3_exec(db, copyData.c_str(), nullptr, nullptr, &error); + if (error) { + return false; + } + + return true; } bool create_schema(sqlite3 *db) { char *error; sqlite3_exec( db, "CREATE TABLE IF NOT EXISTS drafts (" " key TEXT UNIQUE PRIMARY KEY NOT NULL," " text TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS messages (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " local_id TEXT," " thread TEXT NOT NULL," " user TEXT NOT NULL," " type INTEGER NOT NULL," " future_type INTEGER," " content TEXT," " time INTEGER NOT NULL" ");" "CREATE TABLE IF NOT EXISTS olm_persist_account (" " id INTEGER UNIQUE PRIMARY KEY NOT NULL," " account_data TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS olm_persist_sessions (" " target_user_id TEXT UNIQUE PRIMARY KEY NOT NULL," " session_data TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS media (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " container TEXT NOT NULL," " thread TEXT NOT NULL," " uri TEXT NOT NULL," " type TEXT NOT NULL," " extras TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS threads (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " type INTEGER NOT NULL," " name TEXT," " description TEXT," " color TEXT NOT NULL," " creation_time BIGINT NOT NULL," " parent_thread_id TEXT," " containing_thread_id TEXT," " community TEXT," " members TEXT NOT NULL," " roles TEXT NOT NULL," " current_user TEXT NOT NULL," " source_message_id TEXT," " replies_count INTEGER NOT NULL," " avatar TEXT," " pinned_count INTEGER NOT NULL DEFAULT 0" ");" "CREATE TABLE IF NOT EXISTS metadata (" " name TEXT UNIQUE PRIMARY KEY NOT NULL," " data TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS message_store_threads (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " start_reached INTEGER NOT NULL" ");" "CREATE TABLE IF NOT EXISTS reports (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " report TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS persist_storage (" " key TEXT UNIQUE PRIMARY KEY NOT NULL," " item TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS users (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " user_info TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS keyservers (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " keyserver_info TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS keyservers_synced (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " keyserver_info TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS communities (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " community_info TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS messages_to_device (" " message_id TEXT NOT NULL," " device_id TEXT NOT NULL," " user_id TEXT NOT NULL," " timestamp BIGINT NOT NULL," " plaintext TEXT NOT NULL," " ciphertext TEXT NOT NULL," " PRIMARY KEY (message_id, device_id)" ");" "CREATE TABLE IF NOT EXISTS integrity_store (" " id TEXT UNIQUE PRIMARY KEY NOT NULL," " thread_hash TEXT NOT NULL" ");" "CREATE TABLE IF NOT EXISTS synced_metadata (" " name TEXT UNIQUE PRIMARY KEY NOT NULL," " data TEXT NOT NULL" ");" "CREATE INDEX IF NOT EXISTS media_idx_container" " ON media (container);" "CREATE INDEX IF NOT EXISTS messages_idx_thread_time" " ON messages (thread, time);" "CREATE INDEX IF NOT EXISTS messages_to_device_idx_id_timestamp" " ON messages_to_device (device_id, timestamp);", nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream stringStream; stringStream << "Error creating tables: " << error; Logger::log(stringStream.str()); sqlite3_free(error); return false; } void set_encryption_key( sqlite3 *db, const std::string &encryptionKey = SQLiteQueryExecutor::encryptionKey) { std::string set_encryption_key_query = "PRAGMA key = \"x'" + encryptionKey + "'\";"; char *error_set_key; sqlite3_exec( db, set_encryption_key_query.c_str(), nullptr, nullptr, &error_set_key); if (error_set_key) { std::ostringstream error_message; error_message << "Failed to set encryption key: " << error_set_key; throw std::system_error( ECANCELED, std::generic_category(), error_message.str()); } } int get_database_version(sqlite3 *db) { sqlite3_stmt *user_version_stmt; sqlite3_prepare_v2( db, "PRAGMA user_version;", -1, &user_version_stmt, nullptr); sqlite3_step(user_version_stmt); int current_user_version = sqlite3_column_int(user_version_stmt, 0); sqlite3_finalize(user_version_stmt); return current_user_version; } bool set_database_version(sqlite3 *db, int db_version) { std::stringstream update_version; update_version << "PRAGMA user_version=" << db_version << ";"; auto update_version_str = update_version.str(); char *error; sqlite3_exec(db, update_version_str.c_str(), nullptr, nullptr, &error); if (!error) { return true; } std::ostringstream errorStream; errorStream << "Error setting database version to " << db_version << ": " << error; Logger::log(errorStream.str()); sqlite3_free(error); return false; } // We don't want to run `PRAGMA key = ...;` // on main web database. The context is here: // https://linear.app/comm/issue/ENG-6398/issues-with-sqlcipher-on-web void default_on_db_open_callback(sqlite3 *db) { #ifndef EMSCRIPTEN set_encryption_key(db); #endif } // This is a temporary solution. In future we want to keep // a separate table for blob hashes. Tracked on Linear: // https://linear.app/comm/issue/ENG-6261/introduce-blob-hash-table std::string blob_hash_from_blob_service_uri(const std::string &media_uri) { static const std::string blob_service_prefix = "comm-blob-service://"; return media_uri.substr(blob_service_prefix.size()); } bool file_exists(const std::string &file_path) { std::ifstream file(file_path.c_str()); return file.good(); } void attempt_delete_file( const std::string &file_path, const char *error_message) { if (std::remove(file_path.c_str())) { throw std::system_error(errno, std::generic_category(), error_message); } } void attempt_rename_file( const std::string &old_path, const std::string &new_path, const char *error_message) { if (std::rename(old_path.c_str(), new_path.c_str())) { throw std::system_error(errno, std::generic_category(), error_message); } } bool is_database_queryable( sqlite3 *db, bool use_encryption_key, const std::string &path = SQLiteQueryExecutor::sqliteFilePath, const std::string &encryptionKey = SQLiteQueryExecutor::encryptionKey) { char *err_msg; sqlite3_open(path.c_str(), &db); // According to SQLCipher documentation running some SELECT is the only way to // check for key validity if (use_encryption_key) { set_encryption_key(db, encryptionKey); } sqlite3_exec( db, "SELECT COUNT(*) FROM sqlite_master;", nullptr, nullptr, &err_msg); sqlite3_close(db); return !err_msg; } void validate_encryption() { std::string temp_encrypted_db_path = SQLiteQueryExecutor::sqliteFilePath + "_temp_encrypted"; bool temp_encrypted_exists = file_exists(temp_encrypted_db_path); bool default_location_exists = file_exists(SQLiteQueryExecutor::sqliteFilePath); if (temp_encrypted_exists && default_location_exists) { Logger::log( "Previous encryption attempt failed. Repeating encryption process from " "the beginning."); attempt_delete_file( temp_encrypted_db_path, "Failed to delete corrupted encrypted database."); } else if (temp_encrypted_exists && !default_location_exists) { Logger::log( "Moving temporary encrypted database to default location failed in " "previous encryption attempt. Repeating rename step."); attempt_rename_file( temp_encrypted_db_path, SQLiteQueryExecutor::sqliteFilePath, "Failed to move encrypted database to default location."); return; } else if (!default_location_exists) { Logger::log( "Database not present yet. It will be created encrypted under default " "path."); return; } sqlite3 *db; if (is_database_queryable(db, true)) { Logger::log( "Database exists under default path and it is correctly encrypted."); return; } if (!is_database_queryable(db, false)) { Logger::log( "Database exists but it is encrypted with key that was lost. " "Attempting database deletion. New encrypted one will be created."); attempt_delete_file( SQLiteQueryExecutor::sqliteFilePath.c_str(), "Failed to delete database encrypted with lost key."); return; } else { Logger::log( "Database exists but it is not encrypted. Attempting encryption " "process."); } sqlite3_open(SQLiteQueryExecutor::sqliteFilePath.c_str(), &db); std::string createEncryptedCopySQL = "ATTACH DATABASE '" + temp_encrypted_db_path + "' AS encrypted_comm " "KEY \"x'" + SQLiteQueryExecutor::encryptionKey + "'\";" "SELECT sqlcipher_export('encrypted_comm');" "DETACH DATABASE encrypted_comm;"; char *encryption_error; sqlite3_exec( db, createEncryptedCopySQL.c_str(), nullptr, nullptr, &encryption_error); if (encryption_error) { throw std::system_error( ECANCELED, std::generic_category(), "Failed to create encrypted copy of the original database."); } sqlite3_close(db); attempt_delete_file( SQLiteQueryExecutor::sqliteFilePath, "Failed to delete unencrypted database."); attempt_rename_file( temp_encrypted_db_path, SQLiteQueryExecutor::sqliteFilePath, "Failed to move encrypted database to default location."); Logger::log("Encryption completed successfully."); } typedef bool ShouldBeInTransaction; typedef std::function MigrateFunction; typedef std::pair SQLiteMigration; std::vector> migrations{ {{1, {create_drafts_table, true}}, {2, {rename_threadID_to_key, true}}, {4, {create_persist_account_table, true}}, {5, {create_persist_sessions_table, true}}, {15, {create_media_table, true}}, {16, {drop_messages_table, true}}, {17, {recreate_messages_table, true}}, {18, {create_messages_idx_thread_time, true}}, {19, {create_media_idx_container, true}}, {20, {create_threads_table, true}}, {21, {update_threadID_for_pending_threads_in_drafts, true}}, {22, {enable_write_ahead_logging_mode, false}}, {23, {create_metadata_table, true}}, {24, {add_not_null_constraint_to_drafts, true}}, {25, {add_not_null_constraint_to_metadata, true}}, {26, {add_avatar_column_to_threads_table, true}}, {27, {add_pinned_count_column_to_threads, true}}, {28, {create_message_store_threads_table, true}}, {29, {create_reports_table, true}}, {30, {create_persist_storage_table, true}}, {31, {recreate_message_store_threads_table, true}}, {32, {create_users_table, true}}, {33, {create_keyservers_table, true}}, {34, {enable_rollback_journal_mode, false}}, {35, {create_communities_table, true}}, {36, {create_messages_to_device_table, true}}, {37, {create_integrity_table, true}}, {38, {migrate_notifs_crypto_account, true}}, {39, {create_synced_metadata_table, true}}, {40, {create_keyservers_synced, true}}}}; enum class MigrationResult { SUCCESS, FAILURE, NOT_APPLIED }; MigrationResult applyMigrationWithTransaction( sqlite3 *db, const MigrateFunction &migrate, int index) { sqlite3_exec(db, "BEGIN TRANSACTION;", nullptr, nullptr, nullptr); auto db_version = get_database_version(db); if (index <= db_version) { sqlite3_exec(db, "ROLLBACK;", nullptr, nullptr, nullptr); return MigrationResult::NOT_APPLIED; } auto rc = migrate(db); if (!rc) { sqlite3_exec(db, "ROLLBACK;", nullptr, nullptr, nullptr); return MigrationResult::FAILURE; } auto database_version_set = set_database_version(db, index); if (!database_version_set) { sqlite3_exec(db, "ROLLBACK;", nullptr, nullptr, nullptr); return MigrationResult::FAILURE; } sqlite3_exec(db, "END TRANSACTION;", nullptr, nullptr, nullptr); return MigrationResult::SUCCESS; } MigrationResult applyMigrationWithoutTransaction( sqlite3 *db, const MigrateFunction &migrate, int index) { auto db_version = get_database_version(db); if (index <= db_version) { return MigrationResult::NOT_APPLIED; } auto rc = migrate(db); if (!rc) { return MigrationResult::FAILURE; } sqlite3_exec(db, "BEGIN TRANSACTION;", nullptr, nullptr, nullptr); auto inner_db_version = get_database_version(db); if (index <= inner_db_version) { sqlite3_exec(db, "ROLLBACK;", nullptr, nullptr, nullptr); return MigrationResult::NOT_APPLIED; } auto database_version_set = set_database_version(db, index); if (!database_version_set) { sqlite3_exec(db, "ROLLBACK;", nullptr, nullptr, nullptr); return MigrationResult::FAILURE; } sqlite3_exec(db, "END TRANSACTION;", nullptr, nullptr, nullptr); return MigrationResult::SUCCESS; } bool set_up_database(sqlite3 *db) { sqlite3_exec(db, "BEGIN TRANSACTION;", nullptr, nullptr, nullptr); auto db_version = get_database_version(db); auto latest_version = migrations.back().first; if (db_version == latest_version) { sqlite3_exec(db, "ROLLBACK;", nullptr, nullptr, nullptr); return true; } if (db_version != 0 || !create_schema(db) || !set_database_version(db, latest_version)) { sqlite3_exec(db, "ROLLBACK;", nullptr, nullptr, nullptr); return false; } sqlite3_exec(db, "END TRANSACTION;", nullptr, nullptr, nullptr); return true; } void SQLiteQueryExecutor::migrate() { // We don't want to run `PRAGMA key = ...;` // on main web database. The context is here: // https://linear.app/comm/issue/ENG-6398/issues-with-sqlcipher-on-web #ifndef EMSCRIPTEN validate_encryption(); #endif sqlite3 *db; sqlite3_open(SQLiteQueryExecutor::sqliteFilePath.c_str(), &db); default_on_db_open_callback(db); std::stringstream db_path; db_path << "db path: " << SQLiteQueryExecutor::sqliteFilePath.c_str() << std::endl; Logger::log(db_path.str()); auto db_version = get_database_version(db); std::stringstream version_msg; version_msg << "db version: " << db_version << std::endl; Logger::log(version_msg.str()); if (db_version == 0) { auto db_created = set_up_database(db); if (!db_created) { sqlite3_close(db); Logger::log("Database structure creation error."); throw std::runtime_error("Database structure creation error"); } Logger::log("Database structure created."); sqlite3_close(db); return; } for (const auto &[idx, migration] : migrations) { const auto &[applyMigration, shouldBeInTransaction] = migration; MigrationResult migrationResult; if (shouldBeInTransaction) { migrationResult = applyMigrationWithTransaction(db, applyMigration, idx); } else { migrationResult = applyMigrationWithoutTransaction(db, applyMigration, idx); } if (migrationResult == MigrationResult::NOT_APPLIED) { continue; } std::stringstream migration_msg; if (migrationResult == MigrationResult::FAILURE) { migration_msg << "migration " << idx << " failed." << std::endl; Logger::log(migration_msg.str()); sqlite3_close(db); throw std::runtime_error(migration_msg.str()); } if (migrationResult == MigrationResult::SUCCESS) { migration_msg << "migration " << idx << " succeeded." << std::endl; Logger::log(migration_msg.str()); } } sqlite3_close(db); } SQLiteQueryExecutor::SQLiteQueryExecutor() { SQLiteQueryExecutor::migrate(); #ifndef EMSCRIPTEN SQLiteQueryExecutor::initializeTablesForLogMonitoring(); std::string currentBackupID = this->getMetadata("backupID"); if (!StaffUtils::isStaffRelease() || !currentBackupID.size()) { return; } SQLiteQueryExecutor::connectionManager.setLogsMonitoring(true); #endif } SQLiteQueryExecutor::SQLiteQueryExecutor(std::string sqliteFilePath) { SQLiteQueryExecutor::sqliteFilePath = sqliteFilePath; SQLiteQueryExecutor::migrate(); } sqlite3 *SQLiteQueryExecutor::getConnection() { if (SQLiteQueryExecutor::connectionManager.getConnection()) { return SQLiteQueryExecutor::connectionManager.getConnection(); } SQLiteQueryExecutor::connectionManager.initializeConnection( SQLiteQueryExecutor::sqliteFilePath, default_on_db_open_callback); return SQLiteQueryExecutor::connectionManager.getConnection(); } void SQLiteQueryExecutor::closeConnection() { SQLiteQueryExecutor::connectionManager.closeConnection(); } SQLiteQueryExecutor::~SQLiteQueryExecutor() { SQLiteQueryExecutor::closeConnection(); } std::string SQLiteQueryExecutor::getDraft(std::string key) const { static std::string getDraftByPrimaryKeySQL = "SELECT * " "FROM drafts " "WHERE key = ?;"; std::unique_ptr draft = getEntityByPrimaryKey( SQLiteQueryExecutor::getConnection(), getDraftByPrimaryKeySQL, key); return (draft == nullptr) ? "" : draft->text; } std::unique_ptr SQLiteQueryExecutor::getThread(std::string threadID) const { static std::string getThreadByPrimaryKeySQL = "SELECT * " "FROM threads " "WHERE id = ?;"; return getEntityByPrimaryKey( SQLiteQueryExecutor::getConnection(), getThreadByPrimaryKeySQL, threadID); } void SQLiteQueryExecutor::updateDraft(std::string key, std::string text) const { static std::string replaceDraftSQL = "REPLACE INTO drafts (key, text) " "VALUES (?, ?);"; Draft draft = {key, text}; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceDraftSQL, draft); } bool SQLiteQueryExecutor::moveDraft(std::string oldKey, std::string newKey) const { std::string draftText = this->getDraft(oldKey); if (!draftText.size()) { return false; } static std::string rekeyDraftSQL = "UPDATE OR REPLACE drafts " "SET key = ? " "WHERE key = ?;"; rekeyAllEntities( SQLiteQueryExecutor::getConnection(), rekeyDraftSQL, oldKey, newKey); return true; } std::vector SQLiteQueryExecutor::getAllDrafts() const { static std::string getAllDraftsSQL = "SELECT * " "FROM drafts;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllDraftsSQL); } void SQLiteQueryExecutor::removeAllDrafts() const { static std::string removeAllDraftsSQL = "DELETE FROM drafts;"; removeAllEntities(SQLiteQueryExecutor::getConnection(), removeAllDraftsSQL); } void SQLiteQueryExecutor::removeDrafts( const std::vector &ids) const { if (!ids.size()) { return; } std::stringstream removeDraftsByKeysSQLStream; removeDraftsByKeysSQLStream << "DELETE FROM drafts " "WHERE key IN " << getSQLStatementArray(ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeDraftsByKeysSQLStream.str(), ids); } void SQLiteQueryExecutor::removeAllMessages() const { static std::string removeAllMessagesSQL = "DELETE FROM messages;"; removeAllEntities(SQLiteQueryExecutor::getConnection(), removeAllMessagesSQL); } std::vector>> SQLiteQueryExecutor::getAllMessages() const { static std::string getAllMessagesSQL = "SELECT * " "FROM messages " "LEFT JOIN media " " ON messages.id = media.container " "ORDER BY messages.id;"; SQLiteStatementWrapper preparedSQL( SQLiteQueryExecutor::getConnection(), getAllMessagesSQL, "Failed to retrieve all messages."); std::string prevMsgIdx{}; std::vector>> allMessages; for (int stepResult = sqlite3_step(preparedSQL); stepResult == SQLITE_ROW; stepResult = sqlite3_step(preparedSQL)) { Message message = Message::fromSQLResult(preparedSQL, 0); if (message.id == prevMsgIdx) { allMessages.back().second.push_back(Media::fromSQLResult(preparedSQL, 8)); } else { prevMsgIdx = message.id; std::vector mediaForMsg; if (sqlite3_column_type(preparedSQL, 8) != SQLITE_NULL) { mediaForMsg.push_back(Media::fromSQLResult(preparedSQL, 8)); } allMessages.push_back(std::make_pair(std::move(message), mediaForMsg)); } } return allMessages; } void SQLiteQueryExecutor::removeMessages( const std::vector &ids) const { if (!ids.size()) { return; } std::stringstream removeMessagesByKeysSQLStream; removeMessagesByKeysSQLStream << "DELETE FROM messages " "WHERE id IN " << getSQLStatementArray(ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeMessagesByKeysSQLStream.str(), ids); } void SQLiteQueryExecutor::removeMessagesForThreads( const std::vector &threadIDs) const { if (!threadIDs.size()) { return; } std::stringstream removeMessagesByKeysSQLStream; removeMessagesByKeysSQLStream << "DELETE FROM messages " "WHERE thread IN " << getSQLStatementArray(threadIDs.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeMessagesByKeysSQLStream.str(), threadIDs); } void SQLiteQueryExecutor::replaceMessage(const Message &message) const { static std::string replaceMessageSQL = "REPLACE INTO messages " "(id, local_id, thread, user, type, future_type, content, time) " "VALUES (?, ?, ?, ?, ?, ?, ?, ?);"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceMessageSQL, message); } void SQLiteQueryExecutor::rekeyMessage(std::string from, std::string to) const { static std::string rekeyMessageSQL = "UPDATE OR REPLACE messages " "SET id = ? " "WHERE id = ?"; rekeyAllEntities( SQLiteQueryExecutor::getConnection(), rekeyMessageSQL, from, to); } void SQLiteQueryExecutor::removeAllMedia() const { static std::string removeAllMediaSQL = "DELETE FROM media;"; removeAllEntities(SQLiteQueryExecutor::getConnection(), removeAllMediaSQL); } void SQLiteQueryExecutor::removeMediaForMessages( const std::vector &msg_ids) const { if (!msg_ids.size()) { return; } std::stringstream removeMediaByKeysSQLStream; removeMediaByKeysSQLStream << "DELETE FROM media " "WHERE container IN " << getSQLStatementArray(msg_ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeMediaByKeysSQLStream.str(), msg_ids); } void SQLiteQueryExecutor::removeMediaForMessage(std::string msg_id) const { static std::string removeMediaByKeySQL = "DELETE FROM media " "WHERE container IN (?);"; std::vector keys = {msg_id}; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeMediaByKeySQL, keys); } void SQLiteQueryExecutor::removeMediaForThreads( const std::vector &thread_ids) const { if (!thread_ids.size()) { return; } std::stringstream removeMediaByKeysSQLStream; removeMediaByKeysSQLStream << "DELETE FROM media " "WHERE thread IN " << getSQLStatementArray(thread_ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeMediaByKeysSQLStream.str(), thread_ids); } void SQLiteQueryExecutor::replaceMedia(const Media &media) const { static std::string replaceMediaSQL = "REPLACE INTO media " "(id, container, thread, uri, type, extras) " "VALUES (?, ?, ?, ?, ?, ?)"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceMediaSQL, media); } void SQLiteQueryExecutor::rekeyMediaContainers(std::string from, std::string to) const { static std::string rekeyMediaContainersSQL = "UPDATE media SET container = ? WHERE container = ?;"; rekeyAllEntities( SQLiteQueryExecutor::getConnection(), rekeyMediaContainersSQL, from, to); } void SQLiteQueryExecutor::replaceMessageStoreThreads( const std::vector &threads) const { static std::string replaceMessageStoreThreadSQL = "REPLACE INTO message_store_threads " "(id, start_reached) " "VALUES (?, ?);"; for (auto &thread : threads) { replaceEntity( SQLiteQueryExecutor::getConnection(), replaceMessageStoreThreadSQL, thread); } } void SQLiteQueryExecutor::removeAllMessageStoreThreads() const { static std::string removeAllMessageStoreThreadsSQL = "DELETE FROM message_store_threads;"; removeAllEntities( SQLiteQueryExecutor::getConnection(), removeAllMessageStoreThreadsSQL); } void SQLiteQueryExecutor::removeMessageStoreThreads( const std::vector &ids) const { if (!ids.size()) { return; } std::stringstream removeMessageStoreThreadsByKeysSQLStream; removeMessageStoreThreadsByKeysSQLStream << "DELETE FROM message_store_threads " "WHERE id IN " << getSQLStatementArray(ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeMessageStoreThreadsByKeysSQLStream.str(), ids); } std::vector SQLiteQueryExecutor::getAllMessageStoreThreads() const { static std::string getAllMessageStoreThreadsSQL = "SELECT * " "FROM message_store_threads;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllMessageStoreThreadsSQL); } std::vector SQLiteQueryExecutor::getAllThreads() const { static std::string getAllThreadsSQL = "SELECT * " "FROM threads;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllThreadsSQL); }; void SQLiteQueryExecutor::removeThreads(std::vector ids) const { if (!ids.size()) { return; } std::stringstream removeThreadsByKeysSQLStream; removeThreadsByKeysSQLStream << "DELETE FROM threads " "WHERE id IN " << getSQLStatementArray(ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeThreadsByKeysSQLStream.str(), ids); }; void SQLiteQueryExecutor::replaceThread(const Thread &thread) const { static std::string replaceThreadSQL = "REPLACE INTO threads (" " id, type, name, description, color, creation_time, parent_thread_id," " containing_thread_id, community, members, roles, current_user," " source_message_id, replies_count, avatar, pinned_count) " "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceThreadSQL, thread); }; void SQLiteQueryExecutor::removeAllThreads() const { static std::string removeAllThreadsSQL = "DELETE FROM threads;"; removeAllEntities(SQLiteQueryExecutor::getConnection(), removeAllThreadsSQL); }; void SQLiteQueryExecutor::replaceReport(const Report &report) const { static std::string replaceReportSQL = "REPLACE INTO reports (id, report) " "VALUES (?, ?);"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceReportSQL, report); } void SQLiteQueryExecutor::removeAllReports() const { static std::string removeAllReportsSQL = "DELETE FROM reports;"; removeAllEntities(SQLiteQueryExecutor::getConnection(), removeAllReportsSQL); } void SQLiteQueryExecutor::removeReports( const std::vector &ids) const { if (!ids.size()) { return; } std::stringstream removeReportsByKeysSQLStream; removeReportsByKeysSQLStream << "DELETE FROM reports " "WHERE id IN " << getSQLStatementArray(ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeReportsByKeysSQLStream.str(), ids); } std::vector SQLiteQueryExecutor::getAllReports() const { static std::string getAllReportsSQL = "SELECT * " "FROM reports;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllReportsSQL); } void SQLiteQueryExecutor::setPersistStorageItem( std::string key, std::string item) const { static std::string replacePersistStorageItemSQL = "REPLACE INTO persist_storage (key, item) " "VALUES (?, ?);"; PersistItem entry{ key, item, }; replaceEntity( SQLiteQueryExecutor::getConnection(), replacePersistStorageItemSQL, entry); } void SQLiteQueryExecutor::removePersistStorageItem(std::string key) const { static std::string removePersistStorageItemByKeySQL = "DELETE FROM persist_storage " "WHERE key IN (?);"; std::vector keys = {key}; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removePersistStorageItemByKeySQL, keys); } std::string SQLiteQueryExecutor::getPersistStorageItem(std::string key) const { static std::string getPersistStorageItemByPrimaryKeySQL = "SELECT * " "FROM persist_storage " "WHERE key = ?;"; std::unique_ptr entry = getEntityByPrimaryKey( SQLiteQueryExecutor::getConnection(), getPersistStorageItemByPrimaryKeySQL, key); return (entry == nullptr) ? "" : entry->item; } void SQLiteQueryExecutor::replaceUser(const UserInfo &user_info) const { static std::string replaceUserSQL = "REPLACE INTO users (id, user_info) " "VALUES (?, ?);"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceUserSQL, user_info); } void SQLiteQueryExecutor::removeAllUsers() const { static std::string removeAllUsersSQL = "DELETE FROM users;"; removeAllEntities(SQLiteQueryExecutor::getConnection(), removeAllUsersSQL); } void SQLiteQueryExecutor::removeUsers( const std::vector &ids) const { if (!ids.size()) { return; } std::stringstream removeUsersByKeysSQLStream; removeUsersByKeysSQLStream << "DELETE FROM users " "WHERE id IN " << getSQLStatementArray(ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeUsersByKeysSQLStream.str(), ids); } void SQLiteQueryExecutor::replaceKeyserver( const KeyserverInfo &keyserver_info) const { static std::string replaceKeyserverSQL = "REPLACE INTO keyservers (id, keyserver_info) " "VALUES (:id, :keyserver_info);"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceKeyserverSQL, keyserver_info); static std::string replaceKeyserverSyncedSQL = "REPLACE INTO keyservers_synced (id, keyserver_info) " "VALUES (:id, :synced_keyserver_info);"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceKeyserverSyncedSQL, keyserver_info); } void SQLiteQueryExecutor::removeAllKeyservers() const { static std::string removeAllKeyserversSQL = "DELETE FROM keyservers;"; removeAllEntities( SQLiteQueryExecutor::getConnection(), removeAllKeyserversSQL); static std::string removeAllKeyserversSyncedSQL = "DELETE FROM keyservers_synced;"; removeAllEntities( SQLiteQueryExecutor::getConnection(), removeAllKeyserversSyncedSQL); } void SQLiteQueryExecutor::removeKeyservers( const std::vector &ids) const { if (!ids.size()) { return; } auto idArray = getSQLStatementArray(ids.size()); std::stringstream removeKeyserversByKeysSQLStream; removeKeyserversByKeysSQLStream << "DELETE FROM keyservers " "WHERE id IN " << idArray << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeKeyserversByKeysSQLStream.str(), ids); std::stringstream removeKeyserversSyncedByKeysSQLStream; removeKeyserversSyncedByKeysSQLStream << "DELETE FROM keyservers_synced " "WHERE id IN " << idArray << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeKeyserversSyncedByKeysSQLStream.str(), ids); } std::vector SQLiteQueryExecutor::getAllKeyservers() const { static std::string getAllKeyserversSQL = "SELECT " " synced.id, " " COALESCE(keyservers.keyserver_info, ''), " " synced.keyserver_info " "FROM keyservers_synced synced " "LEFT JOIN keyservers " " ON synced.id = keyservers.id;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllKeyserversSQL); } std::vector SQLiteQueryExecutor::getAllUsers() const { static std::string getAllUsersSQL = "SELECT * " "FROM users;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllUsersSQL); } void SQLiteQueryExecutor::replaceCommunity( const CommunityInfo &community_info) const { static std::string replaceCommunitySQL = "REPLACE INTO communities (id, community_info) " "VALUES (?, ?);"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceCommunitySQL, community_info); } void SQLiteQueryExecutor::removeAllCommunities() const { static std::string removeAllCommunitiesSQL = "DELETE FROM communities;"; removeAllEntities( SQLiteQueryExecutor::getConnection(), removeAllCommunitiesSQL); } void SQLiteQueryExecutor::removeCommunities( const std::vector &ids) const { if (!ids.size()) { return; } std::stringstream removeCommunitiesByKeysSQLStream; removeCommunitiesByKeysSQLStream << "DELETE FROM communities " "WHERE id IN " << getSQLStatementArray(ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeCommunitiesByKeysSQLStream.str(), ids); } std::vector SQLiteQueryExecutor::getAllCommunities() const { static std::string getAllCommunitiesSQL = "SELECT * " "FROM communities;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllCommunitiesSQL); } void SQLiteQueryExecutor::replaceIntegrityThreadHashes( const std::vector &threadHashes) const { static std::string replaceIntegrityThreadHashSQL = "REPLACE INTO integrity_store (id, thread_hash) " "VALUES (?, ?);"; for (const IntegrityThreadHash &integrityThreadHash : threadHashes) { replaceEntity( SQLiteQueryExecutor::getConnection(), replaceIntegrityThreadHashSQL, integrityThreadHash); } } void SQLiteQueryExecutor::removeAllIntegrityThreadHashes() const { static std::string removeAllIntegrityThreadHashesSQL = "DELETE FROM integrity_store;"; removeAllEntities( SQLiteQueryExecutor::getConnection(), removeAllIntegrityThreadHashesSQL); } void SQLiteQueryExecutor::removeIntegrityThreadHashes( const std::vector &ids) const { if (!ids.size()) { return; } std::stringstream removeIntegrityThreadHashesByKeysSQLStream; removeIntegrityThreadHashesByKeysSQLStream << "DELETE FROM integrity_store " "WHERE id IN " << getSQLStatementArray(ids.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeIntegrityThreadHashesByKeysSQLStream.str(), ids); } std::vector SQLiteQueryExecutor::getAllIntegrityThreadHashes() const { static std::string getAllIntegrityThreadHashesSQL = "SELECT * " "FROM integrity_store;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllIntegrityThreadHashesSQL); } void SQLiteQueryExecutor::replaceSyncedMetadataEntry( const SyncedMetadataEntry &synced_metadata_entry) const { static std::string replaceSyncedMetadataEntrySQL = "REPLACE INTO synced_metadata (name, data) " "VALUES (?, ?);"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceSyncedMetadataEntrySQL, synced_metadata_entry); } void SQLiteQueryExecutor::removeAllSyncedMetadata() const { static std::string removeAllSyncedMetadataSQL = "DELETE FROM synced_metadata;"; removeAllEntities( SQLiteQueryExecutor::getConnection(), removeAllSyncedMetadataSQL); } void SQLiteQueryExecutor::removeSyncedMetadata( const std::vector &names) const { if (!names.size()) { return; } std::stringstream removeSyncedMetadataByNamesSQLStream; removeSyncedMetadataByNamesSQLStream << "DELETE FROM synced_metadata " "WHERE name IN " << getSQLStatementArray(names.size()) << ";"; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeSyncedMetadataByNamesSQLStream.str(), names); } std::vector SQLiteQueryExecutor::getAllSyncedMetadata() const { static std::string getAllSyncedMetadataSQL = "SELECT * " "FROM synced_metadata;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllSyncedMetadataSQL); } void SQLiteQueryExecutor::beginTransaction() const { executeQuery(SQLiteQueryExecutor::getConnection(), "BEGIN TRANSACTION;"); } void SQLiteQueryExecutor::commitTransaction() const { executeQuery(SQLiteQueryExecutor::getConnection(), "COMMIT;"); } void SQLiteQueryExecutor::rollbackTransaction() const { executeQuery(SQLiteQueryExecutor::getConnection(), "ROLLBACK;"); } int SQLiteQueryExecutor::getContentAccountID() const { return CONTENT_ACCOUNT_ID; } int SQLiteQueryExecutor::getNotifsAccountID() const { return NOTIFS_ACCOUNT_ID; } std::vector SQLiteQueryExecutor::getOlmPersistSessionsData() const { static std::string getAllOlmPersistSessionsSQL = "SELECT * " "FROM olm_persist_sessions;"; return getAllEntities( SQLiteQueryExecutor::getConnection(), getAllOlmPersistSessionsSQL); } std::optional SQLiteQueryExecutor::getOlmPersistAccountData(int accountID) const { static std::string getOlmPersistAccountSQL = "SELECT * " "FROM olm_persist_account " "WHERE id = ?;"; std::unique_ptr result = getEntityByIntegerPrimaryKey( SQLiteQueryExecutor::getConnection(), getOlmPersistAccountSQL, accountID); if (result == nullptr) { return std::nullopt; } return result->account_data; } void SQLiteQueryExecutor::storeOlmPersistAccount( int accountID, const std::string &accountData) const { static std::string replaceOlmPersistAccountSQL = "REPLACE INTO olm_persist_account (id, account_data) " "VALUES (?, ?);"; OlmPersistAccount persistAccount = {accountID, accountData}; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceOlmPersistAccountSQL, persistAccount); } void SQLiteQueryExecutor::storeOlmPersistSession( const OlmPersistSession &session) const { static std::string replaceOlmPersistSessionSQL = "REPLACE INTO olm_persist_sessions (target_user_id, session_data) " "VALUES (?, ?);"; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceOlmPersistSessionSQL, session); } void SQLiteQueryExecutor::storeOlmPersistData( int accountID, crypto::Persist persist) const { if (accountID != CONTENT_ACCOUNT_ID && persist.sessions.size() > 0) { throw std::runtime_error( "Attempt to store notifications sessions in SQLite. Notifications " "sessions must be stored in storage shared with NSE."); } std::string accountData = std::string(persist.account.begin(), persist.account.end()); this->storeOlmPersistAccount(accountID, accountData); for (auto it = persist.sessions.begin(); it != persist.sessions.end(); it++) { OlmPersistSession persistSession = { it->first, std::string(it->second.begin(), it->second.end())}; this->storeOlmPersistSession(persistSession); } } void SQLiteQueryExecutor::setNotifyToken(std::string token) const { this->setMetadata("notify_token", token); } void SQLiteQueryExecutor::clearNotifyToken() const { this->clearMetadata("notify_token"); } void SQLiteQueryExecutor::setCurrentUserID(std::string userID) const { this->setMetadata("current_user_id", userID); } std::string SQLiteQueryExecutor::getCurrentUserID() const { return this->getMetadata("current_user_id"); } void SQLiteQueryExecutor::setMetadata(std::string entry_name, std::string data) const { std::string replaceMetadataSQL = "REPLACE INTO metadata (name, data) " "VALUES (?, ?);"; Metadata entry{ entry_name, data, }; replaceEntity( SQLiteQueryExecutor::getConnection(), replaceMetadataSQL, entry); } void SQLiteQueryExecutor::clearMetadata(std::string entry_name) const { static std::string removeMetadataByKeySQL = "DELETE FROM metadata " "WHERE name IN (?);"; std::vector keys = {entry_name}; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeMetadataByKeySQL, keys); } std::string SQLiteQueryExecutor::getMetadata(std::string entry_name) const { std::string getMetadataByPrimaryKeySQL = "SELECT * " "FROM metadata " "WHERE name = ?;"; std::unique_ptr entry = getEntityByPrimaryKey( SQLiteQueryExecutor::getConnection(), getMetadataByPrimaryKeySQL, entry_name); return (entry == nullptr) ? "" : entry->data; } void SQLiteQueryExecutor::addMessagesToDevice( const std::vector &messages) const { static std::string addMessageToDevice = "REPLACE INTO messages_to_device (" " message_id, device_id, user_id, timestamp, plaintext, ciphertext) " "VALUES (?, ?, ?, ?, ?, ?);"; for (const ClientMessageToDevice &clientMessage : messages) { MessageToDevice message = clientMessage.toMessageToDevice(); replaceEntity( SQLiteQueryExecutor::getConnection(), addMessageToDevice, message); } } std::vector SQLiteQueryExecutor::getAllMessagesToDevice(const std::string &deviceID) const { std::string query = "SELECT * FROM messages_to_device " "WHERE device_id = ? " "ORDER BY timestamp;"; SQLiteStatementWrapper preparedSQL( SQLiteQueryExecutor::getConnection(), query, "Failed to get all messages to device"); sqlite3_bind_text(preparedSQL, 1, deviceID.c_str(), -1, SQLITE_TRANSIENT); std::vector messages; for (int stepResult = sqlite3_step(preparedSQL); stepResult == SQLITE_ROW; stepResult = sqlite3_step(preparedSQL)) { messages.emplace_back( ClientMessageToDevice(MessageToDevice::fromSQLResult(preparedSQL, 0))); } return messages; } void SQLiteQueryExecutor::removeMessagesToDeviceOlderThan( const ClientMessageToDevice &lastConfirmedMessageClient) const { static std::string query = "DELETE FROM messages_to_device " "WHERE timestamp <= ? AND device_id IN (?);"; MessageToDevice lastConfirmedMessage = lastConfirmedMessageClient.toMessageToDevice(); comm::SQLiteStatementWrapper preparedSQL( SQLiteQueryExecutor::getConnection(), query, "Failed to remove messages to device"); sqlite3_bind_int64(preparedSQL, 1, lastConfirmedMessage.timestamp); sqlite3_bind_text( preparedSQL, 2, lastConfirmedMessage.device_id.c_str(), -1, SQLITE_TRANSIENT); int result = sqlite3_step(preparedSQL); if (result != SQLITE_DONE) { throw std::runtime_error( "Failed to execute removeMessagesToDeviceOlderThan statement: " + std::string(sqlite3_errmsg(SQLiteQueryExecutor::getConnection()))); } } void SQLiteQueryExecutor::removeAllMessagesForDevice( const std::string &deviceID) const { static std::string removeMessagesSQL = "DELETE FROM messages_to_device " "WHERE device_id IN (?);"; std::vector keys = {deviceID}; removeEntitiesByKeys( SQLiteQueryExecutor::getConnection(), removeMessagesSQL, keys); } #ifdef EMSCRIPTEN std::vector SQLiteQueryExecutor::getAllThreadsWeb() const { auto threads = this->getAllThreads(); std::vector webThreads; webThreads.reserve(threads.size()); for (const auto &thread : threads) { webThreads.emplace_back(thread); } return webThreads; }; void SQLiteQueryExecutor::replaceThreadWeb(const WebThread &thread) const { this->replaceThread(thread.toThread()); }; std::vector SQLiteQueryExecutor::getAllMessagesWeb() const { auto allMessages = this->getAllMessages(); std::vector allMessageWithMedias; for (auto &messageWitMedia : allMessages) { allMessageWithMedias.push_back( {std::move(messageWitMedia.first), messageWitMedia.second}); } return allMessageWithMedias; } void SQLiteQueryExecutor::replaceMessageWeb(const WebMessage &message) const { this->replaceMessage(message.toMessage()); }; NullableString SQLiteQueryExecutor::getOlmPersistAccountDataWeb(int accountID) const { std::optional accountData = this->getOlmPersistAccountData(accountID); if (!accountData.has_value()) { return NullableString(); } return std::make_unique(accountData.value()); } #else void SQLiteQueryExecutor::clearSensitiveData() { SQLiteQueryExecutor::closeConnection(); if (file_exists(SQLiteQueryExecutor::sqliteFilePath) && std::remove(SQLiteQueryExecutor::sqliteFilePath.c_str())) { std::ostringstream errorStream; errorStream << "Failed to delete database file. Details: " << strerror(errno); throw std::system_error(errno, std::generic_category(), errorStream.str()); } SQLiteQueryExecutor::generateFreshEncryptionKey(); SQLiteQueryExecutor::migrate(); } void SQLiteQueryExecutor::initialize(std::string &databasePath) { std::call_once(SQLiteQueryExecutor::initialized, [&databasePath]() { SQLiteQueryExecutor::sqliteFilePath = databasePath; folly::Optional maybeEncryptionKey = CommSecureStore::get(SQLiteQueryExecutor::secureStoreEncryptionKeyID); folly::Optional maybeBackupLogsEncryptionKey = CommSecureStore::get( SQLiteQueryExecutor::secureStoreBackupLogsEncryptionKeyID); if (file_exists(databasePath) && maybeEncryptionKey && maybeBackupLogsEncryptionKey) { SQLiteQueryExecutor::encryptionKey = maybeEncryptionKey.value(); SQLiteQueryExecutor::backupLogsEncryptionKey = maybeBackupLogsEncryptionKey.value(); return; } else if (file_exists(databasePath) && maybeEncryptionKey) { SQLiteQueryExecutor::encryptionKey = maybeEncryptionKey.value(); SQLiteQueryExecutor::generateFreshBackupLogsEncryptionKey(); return; } SQLiteQueryExecutor::generateFreshEncryptionKey(); }); } void SQLiteQueryExecutor::initializeTablesForLogMonitoring() { sqlite3 *db; sqlite3_open(SQLiteQueryExecutor::sqliteFilePath.c_str(), &db); default_on_db_open_callback(db); std::vector tablesToMonitor; { SQLiteStatementWrapper preparedSQL( db, "SELECT name FROM sqlite_master WHERE type='table';", "Failed to get all database tables"); for (int stepResult = sqlite3_step(preparedSQL); stepResult == SQLITE_ROW; stepResult = sqlite3_step(preparedSQL)) { std::string table_name = reinterpret_cast(sqlite3_column_text(preparedSQL, 0)); if (SQLiteQueryExecutor::backedUpTablesBlocklist.find(table_name) == SQLiteQueryExecutor::backedUpTablesBlocklist.end()) { tablesToMonitor.emplace_back(table_name); } } // Runs preparedSQL destructor which finalizes the sqlite statement } sqlite3_close(db); SQLiteQueryExecutor::connectionManager.tablesToMonitor = tablesToMonitor; } void SQLiteQueryExecutor::createMainCompaction(std::string backupID) const { std::string finalBackupPath = PlatformSpecificTools::getBackupFilePath(backupID, false); std::string finalAttachmentsPath = PlatformSpecificTools::getBackupFilePath(backupID, true); std::string tempBackupPath = finalBackupPath + "_tmp"; std::string tempAttachmentsPath = finalAttachmentsPath + "_tmp"; if (file_exists(tempBackupPath)) { Logger::log( "Attempting to delete temporary backup file from previous backup " "attempt."); attempt_delete_file( tempBackupPath, "Failed to delete temporary backup file from previous backup attempt."); } if (file_exists(tempAttachmentsPath)) { Logger::log( "Attempting to delete temporary attachments file from previous backup " "attempt."); attempt_delete_file( tempAttachmentsPath, "Failed to delete temporary attachments file from previous backup " "attempt."); } sqlite3 *backupDB; sqlite3_open(tempBackupPath.c_str(), &backupDB); set_encryption_key(backupDB); sqlite3_backup *backupObj = sqlite3_backup_init( backupDB, "main", SQLiteQueryExecutor::getConnection(), "main"); if (!backupObj) { std::stringstream error_message; error_message << "Failed to init backup for main compaction. Details: " << sqlite3_errmsg(backupDB) << std::endl; sqlite3_close(backupDB); throw std::runtime_error(error_message.str()); } int backupResult = sqlite3_backup_step(backupObj, -1); sqlite3_backup_finish(backupObj); if (backupResult == SQLITE_BUSY || backupResult == SQLITE_LOCKED) { sqlite3_close(backupDB); throw std::runtime_error( "Programmer error. Database in transaction during backup attempt."); } else if (backupResult != SQLITE_DONE) { sqlite3_close(backupDB); std::stringstream error_message; error_message << "Failed to create database backup. Details: " << sqlite3_errstr(backupResult); throw std::runtime_error(error_message.str()); } if (!SQLiteQueryExecutor::backedUpTablesBlocklist.empty()) { std::string removeDeviceSpecificDataSQL = ""; for (const auto &table_name : SQLiteQueryExecutor::backedUpTablesBlocklist) { removeDeviceSpecificDataSQL.append("DELETE FROM " + table_name + ";\n"); } executeQuery(backupDB, removeDeviceSpecificDataSQL); } executeQuery(backupDB, "VACUUM;"); sqlite3_close(backupDB); attempt_rename_file( tempBackupPath, finalBackupPath, "Failed to rename complete temporary backup file to final backup file."); std::ofstream tempAttachmentsFile(tempAttachmentsPath); if (!tempAttachmentsFile.is_open()) { throw std::runtime_error( "Unable to create attachments file for backup id: " + backupID); } std::string getAllBlobServiceMediaSQL = "SELECT * FROM media WHERE uri LIKE 'comm-blob-service://%';"; std::vector blobServiceMedia = getAllEntities( SQLiteQueryExecutor::getConnection(), getAllBlobServiceMediaSQL); for (const auto &media : blobServiceMedia) { std::string blobServiceURI = media.uri; std::string blobHash = blob_hash_from_blob_service_uri(blobServiceURI); tempAttachmentsFile << blobHash << "\n"; } tempAttachmentsFile.close(); attempt_rename_file( tempAttachmentsPath, finalAttachmentsPath, "Failed to rename complete temporary attachments file to final " "attachments file."); this->setMetadata("backupID", backupID); this->clearMetadata("logID"); if (StaffUtils::isStaffRelease()) { SQLiteQueryExecutor::connectionManager.setLogsMonitoring(true); } } void SQLiteQueryExecutor::generateFreshEncryptionKey() { std::string encryptionKey = comm::crypto::Tools::generateRandomHexString( SQLiteQueryExecutor::sqlcipherEncryptionKeySize); CommSecureStore::set( SQLiteQueryExecutor::secureStoreEncryptionKeyID, encryptionKey); SQLiteQueryExecutor::encryptionKey = encryptionKey; SQLiteQueryExecutor::generateFreshBackupLogsEncryptionKey(); } void SQLiteQueryExecutor::generateFreshBackupLogsEncryptionKey() { std::string backupLogsEncryptionKey = comm::crypto::Tools::generateRandomHexString( SQLiteQueryExecutor::backupLogsEncryptionKeySize); CommSecureStore::set( SQLiteQueryExecutor::secureStoreBackupLogsEncryptionKeyID, backupLogsEncryptionKey); SQLiteQueryExecutor::backupLogsEncryptionKey = backupLogsEncryptionKey; } void SQLiteQueryExecutor::captureBackupLogs() const { std::string backupID = this->getMetadata("backupID"); if (!backupID.size()) { return; } std::string logID = this->getMetadata("logID"); if (!logID.size()) { logID = "1"; } bool newLogCreated = SQLiteQueryExecutor::connectionManager.captureLogs( backupID, logID, SQLiteQueryExecutor::backupLogsEncryptionKey); if (!newLogCreated) { return; } this->setMetadata("logID", std::to_string(std::stoi(logID) + 1)); } #endif void SQLiteQueryExecutor::restoreFromMainCompaction( std::string mainCompactionPath, std::string mainCompactionEncryptionKey) const { if (!file_exists(mainCompactionPath)) { throw std::runtime_error("Restore attempt but backup file does not exist."); } sqlite3 *backupDB; if (!is_database_queryable( backupDB, true, mainCompactionPath, mainCompactionEncryptionKey)) { throw std::runtime_error("Backup file or encryption key corrupted."); } // We don't want to run `PRAGMA key = ...;` // on main web database. The context is here: // https://linear.app/comm/issue/ENG-6398/issues-with-sqlcipher-on-web #ifdef EMSCRIPTEN std::string plaintextBackupPath = mainCompactionPath + "_plaintext"; if (file_exists(plaintextBackupPath)) { attempt_delete_file( plaintextBackupPath, "Failed to delete plaintext backup file from previous backup attempt."); } std::string plaintextMigrationDBQuery = "PRAGMA key = \"x'" + mainCompactionEncryptionKey + "'\";" "ATTACH DATABASE '" + plaintextBackupPath + "' AS plaintext KEY '';" "SELECT sqlcipher_export('plaintext');" "DETACH DATABASE plaintext;"; sqlite3_open(mainCompactionPath.c_str(), &backupDB); char *plaintextMigrationErr; sqlite3_exec( backupDB, plaintextMigrationDBQuery.c_str(), nullptr, nullptr, &plaintextMigrationErr); sqlite3_close(backupDB); if (plaintextMigrationErr) { std::stringstream error_message; error_message << "Failed to migrate backup SQLCipher file to plaintext " "SQLite file. Details" << plaintextMigrationErr << std::endl; std::string error_message_str = error_message.str(); sqlite3_free(plaintextMigrationErr); throw std::runtime_error(error_message_str); } sqlite3_open(plaintextBackupPath.c_str(), &backupDB); #else sqlite3_open(mainCompactionPath.c_str(), &backupDB); set_encryption_key(backupDB, mainCompactionEncryptionKey); #endif sqlite3_backup *backupObj = sqlite3_backup_init( SQLiteQueryExecutor::getConnection(), "main", backupDB, "main"); if (!backupObj) { std::stringstream error_message; error_message << "Failed to init backup for main compaction. Details: " << sqlite3_errmsg(SQLiteQueryExecutor::getConnection()) << std::endl; sqlite3_close(backupDB); throw std::runtime_error(error_message.str()); } int backupResult = sqlite3_backup_step(backupObj, -1); sqlite3_backup_finish(backupObj); sqlite3_close(backupDB); if (backupResult == SQLITE_BUSY || backupResult == SQLITE_LOCKED) { throw std::runtime_error( "Programmer error. Database in transaction during restore attempt."); } else if (backupResult != SQLITE_DONE) { std::stringstream error_message; error_message << "Failed to restore database from backup. Details: " << sqlite3_errstr(backupResult); throw std::runtime_error(error_message.str()); } #ifdef EMSCRIPTEN attempt_delete_file( plaintextBackupPath, "Failed to delete plaintext compaction file after successful restore."); #endif attempt_delete_file( mainCompactionPath, "Failed to delete main compaction file after successful restore."); } void SQLiteQueryExecutor::restoreFromBackupLog( const std::vector &backupLog) const { SQLiteQueryExecutor::connectionManager.restoreFromBackupLog(backupLog); } } // namespace comm diff --git a/native/redux/persist.js b/native/redux/persist.js index f995414ef..04b83548d 100644 --- a/native/redux/persist.js +++ b/native/redux/persist.js @@ -1,1254 +1,1293 @@ // @flow import AsyncStorage from '@react-native-async-storage/async-storage'; import invariant from 'invariant'; import { Platform } from 'react-native'; import Orientation from 'react-native-orientation-locker'; import { createTransform } from 'redux-persist'; import type { Transform } from 'redux-persist/es/types.js'; import { convertEntryStoreToNewIDSchema, convertInviteLinksStoreToNewIDSchema, convertMessageStoreToNewIDSchema, convertRawMessageInfoToNewIDSchema, convertCalendarFilterToNewIDSchema, convertConnectionInfoToNewIDSchema, } from 'lib/_generated/migration-utils.js'; import { extractKeyserverIDFromID } from 'lib/keyserver-conn/keyserver-call-utils.js'; import { type ClientDBIntegrityStoreOperation, integrityStoreOpsHandlers, type ReplaceIntegrityThreadHashesOperation, } from 'lib/ops/integrity-store-ops.js'; import { type ClientDBKeyserverStoreOperation, keyserverStoreOpsHandlers, type ReplaceKeyserverOperation, } from 'lib/ops/keyserver-store-ops.js'; import { type ClientDBMessageStoreOperation, messageStoreOpsHandlers, } from 'lib/ops/message-store-ops.js'; import { type ReportStoreOperation, type ClientDBReportStoreOperation, convertReportsToReplaceReportOps, reportStoreOpsHandlers, } from 'lib/ops/report-store-ops.js'; import type { ClientDBThreadStoreOperation } from 'lib/ops/thread-store-ops.js'; import { threadStoreOpsHandlers } from 'lib/ops/thread-store-ops.js'; import { type ClientDBUserStoreOperation, type UserStoreOperation, convertUserInfosToReplaceUserOps, userStoreOpsHandlers, } from 'lib/ops/user-store-ops.js'; import { filterThreadIDsInFilterList } from 'lib/reducers/calendar-filters-reducer.js'; import { highestLocalIDSelector } from 'lib/selectors/local-id-selectors.js'; import { createAsyncMigrate } from 'lib/shared/create-async-migrate.js'; import { inconsistencyResponsesToReports } from 'lib/shared/report-utils.js'; import { getContainingThreadID, getCommunity, assertAllThreadInfosAreLegacy, } from 'lib/shared/thread-utils.js'; import { keyserverStoreTransform } from 'lib/shared/transforms/keyserver-store-transform.js'; import { DEPRECATED_unshimMessageStore, unshimFunc, } from 'lib/shared/unshim-utils.js'; import { defaultEnabledApps } from 'lib/types/enabled-apps.js'; import { defaultCalendarQuery } from 'lib/types/entry-types.js'; import { defaultCalendarFilters } from 'lib/types/filter-types.js'; import type { KeyserverInfo } from 'lib/types/keyserver-types.js'; import { messageTypes, type MessageType, } from 'lib/types/message-types-enum.js'; import { type LocalMessageInfo, type MessageStore, type MessageStoreThreads, type RawMessageInfo, } from 'lib/types/message-types.js'; import { minimallyEncodeRawThreadInfo } from 'lib/types/minimally-encoded-thread-permissions-types.js'; import type { RawThreadInfo } from 'lib/types/minimally-encoded-thread-permissions-types.js'; import type { ReportStore, ClientReportCreationRequest, } from 'lib/types/report-types.js'; import { defaultConnectionInfo } from 'lib/types/socket-types.js'; import { defaultGlobalThemeInfo } from 'lib/types/theme-types.js'; import type { ClientDBThreadInfo, LegacyRawThreadInfo, MixedRawThreadInfos, } from 'lib/types/thread-types.js'; import { wipeKeyserverStore } from 'lib/utils/keyserver-store-utils.js'; import { translateClientDBMessageInfoToRawMessageInfo, translateRawMessageInfoToClientDBMessageInfo, } from 'lib/utils/message-ops-utils.js'; import { generateIDSchemaMigrationOpsForDrafts, convertMessageStoreThreadsToNewIDSchema, convertThreadStoreThreadInfosToNewIDSchema, } from 'lib/utils/migration-utils.js'; import { entries } from 'lib/utils/objects.js'; import { defaultNotifPermissionAlertInfo } from 'lib/utils/push-alerts.js'; import { resetUserSpecificState } from 'lib/utils/reducers-utils.js'; import { deprecatedConvertClientDBThreadInfoToRawThreadInfo, convertRawThreadInfoToClientDBThreadInfo, } from 'lib/utils/thread-ops-utils.js'; import { getUUID } from 'lib/utils/uuid.js'; import { createUpdateDBOpsForMessageStoreMessages, createUpdateDBOpsForMessageStoreThreads, } from './client-db-utils.js'; import { defaultState } from './default-state.js'; import { deprecatedCreateUpdateDBOpsForThreadStoreThreadInfos, deprecatedUpdateClientDBThreadStoreThreadInfos, } from './deprecated-client-db-utils.js'; import { migrateThreadStoreForEditThreadPermissions } from './edit-thread-permission-migration.js'; import { legacyUpdateRolesAndPermissions } from './legacy-update-roles-and-permissions.js'; import { persistMigrationForManagePinsThreadPermission } from './manage-pins-permission-migration.js'; import { persistMigrationToRemoveSelectRolePermissions } from './remove-select-role-permissions.js'; import type { AppState } from './state-types.js'; import { nonUserSpecificFieldsNative } from './state-types.js'; import { unshimClientDB } from './unshim-utils.js'; import { authoritativeKeyserverID } from '../authoritative-keyserver.js'; import { commCoreModule } from '../native-modules.js'; import { defaultDeviceCameraInfo } from '../types/camera.js'; import { isTaskCancelledError } from '../utils/error-handling.js'; import { defaultURLPrefix } from '../utils/url-utils.js'; const persistBlacklist = [ 'loadingStatuses', 'lifecycleState', 'dimensions', 'draftStore', 'connectivity', 'deviceOrientation', 'frozen', 'threadStore', 'storeLoaded', 'dbOpsStore', 'syncedMetadataStore', ]; function handleReduxMigrationFailure(oldState: AppState): AppState { const persistedNonUserSpecificFields = nonUserSpecificFieldsNative.filter( field => !persistBlacklist.includes(field) || field === '_persist', ); const stateAfterReset = resetUserSpecificState( oldState, defaultState, persistedNonUserSpecificFields, ); return { ...stateAfterReset, keyserverStore: wipeKeyserverStore(stateAfterReset.keyserverStore), }; } const migrations = { [1]: (state: AppState) => ({ ...state, notifPermissionAlertInfo: defaultNotifPermissionAlertInfo, }), [2]: (state: AppState) => ({ ...state, messageSentFromRoute: [], }), [3]: (state: any) => ({ currentUserInfo: state.currentUserInfo, entryStore: state.entryStore, threadInfos: state.threadInfos, userInfos: state.userInfos, messageStore: { ...state.messageStore, currentAsOf: state.currentAsOf, }, updatesCurrentAsOf: state.currentAsOf, cookie: state.cookie, deviceToken: state.deviceToken, urlPrefix: state.urlPrefix, customServer: state.customServer, notifPermissionAlertInfo: state.notifPermissionAlertInfo, messageSentFromRoute: state.messageSentFromRoute, _persist: state._persist, }), [4]: (state: AppState) => ({ ...state, pingTimestamps: undefined, activeServerRequests: undefined, }), [5]: (state: AppState) => ({ ...state, calendarFilters: defaultCalendarFilters, }), [6]: (state: any) => ({ ...state, threadInfos: undefined, threadStore: { threadInfos: state.threadInfos, inconsistencyResponses: [], }, }), [7]: (state: AppState) => ({ ...state, lastUserInteraction: undefined, sessionID: undefined, entryStore: { ...state.entryStore, inconsistencyResponses: [], }, }), [8]: (state: AppState) => ({ ...state, pingTimestamps: undefined, activeServerRequests: undefined, connection: { ...defaultConnectionInfo, actualizedCalendarQuery: defaultCalendarQuery(Platform.OS), }, watchedThreadIDs: [], entryStore: { ...state.entryStore, actualizedCalendarQuery: undefined, }, }), [9]: (state: any) => ({ ...state, connection: { ...state.connection, lateResponses: [], }, }), [10]: (state: any) => ({ ...state, nextLocalID: highestLocalIDSelector(state) + 1, connection: { ...state.connection, showDisconnectedBar: false, }, messageStore: { ...state.messageStore, local: {}, }, }), [11]: (state: AppState) => ({ ...state, messageStore: DEPRECATED_unshimMessageStore(state.messageStore, [ messageTypes.IMAGES, ]), }), [12]: (state: AppState) => ({ ...state, globalThemeInfo: defaultGlobalThemeInfo, }), [13]: (state: AppState) => ({ ...state, deviceCameraInfo: defaultDeviceCameraInfo, deviceOrientation: Orientation.getInitialOrientation(), }), [14]: (state: AppState) => state, [15]: (state: any) => ({ ...state, threadStore: { ...state.threadStore, inconsistencyReports: inconsistencyResponsesToReports( state.threadStore.inconsistencyResponses, ), inconsistencyResponses: undefined, }, entryStore: { ...state.entryStore, inconsistencyReports: inconsistencyResponsesToReports( state.entryStore.inconsistencyResponses, ), inconsistencyResponses: undefined, }, queuedReports: [], }), [16]: (state: any) => { const result = { ...state, messageSentFromRoute: undefined, dataLoaded: !!state.currentUserInfo && !state.currentUserInfo.anonymous, }; if (state.navInfo) { result.navInfo = { ...state.navInfo, navigationState: undefined, }; } return result; }, [17]: (state: any) => ({ ...state, userInfos: undefined, userStore: { userInfos: state.userInfos, inconsistencyResponses: [], }, }), [18]: (state: AppState) => ({ ...state, userStore: { userInfos: state.userStore.userInfos, inconsistencyReports: [], }, }), [19]: (state: any) => { const threadInfos: { [string]: LegacyRawThreadInfo } = {}; for (const threadID in state.threadStore.threadInfos) { const threadInfo = state.threadStore.threadInfos[threadID]; const { visibilityRules, ...rest } = threadInfo; threadInfos[threadID] = rest; } return { ...state, threadStore: { ...state.threadStore, threadInfos, }, }; }, [20]: (state: AppState) => ({ ...state, messageStore: DEPRECATED_unshimMessageStore(state.messageStore, [ messageTypes.LEGACY_UPDATE_RELATIONSHIP, ]), }), [21]: (state: AppState) => ({ ...state, messageStore: DEPRECATED_unshimMessageStore(state.messageStore, [ messageTypes.CREATE_SIDEBAR, messageTypes.SIDEBAR_SOURCE, ]), }), [22]: (state: any) => { for (const key in state.drafts) { const value = state.drafts[key]; try { void commCoreModule.updateDraft(key, value); } catch (e) { if (!isTaskCancelledError(e)) { throw e; } } } return { ...state, drafts: undefined, }; }, [23]: (state: AppState) => ({ ...state, globalThemeInfo: defaultGlobalThemeInfo, }), [24]: (state: AppState) => ({ ...state, enabledApps: defaultEnabledApps, }), [25]: (state: AppState) => ({ ...state, crashReportsEnabled: __DEV__, }), [26]: (state: any) => { const { currentUserInfo } = state; if (currentUserInfo.anonymous) { return state; } return { ...state, crashReportsEnabled: undefined, currentUserInfo: { id: currentUserInfo.id, username: currentUserInfo.username, }, enabledReports: { crashReports: __DEV__, inconsistencyReports: __DEV__, mediaReports: __DEV__, }, }; }, [27]: (state: any) => ({ ...state, queuedReports: undefined, enabledReports: undefined, threadStore: { ...state.threadStore, inconsistencyReports: undefined, }, entryStore: { ...state.entryStore, inconsistencyReports: undefined, }, reportStore: { enabledReports: { crashReports: __DEV__, inconsistencyReports: __DEV__, mediaReports: __DEV__, }, queuedReports: [ ...state.entryStore.inconsistencyReports, ...state.threadStore.inconsistencyReports, ...state.queuedReports, ], }, }), [28]: (state: AppState) => { const threadParentToChildren: { [string]: string[] } = {}; for (const threadID in state.threadStore.threadInfos) { const threadInfo = state.threadStore.threadInfos[threadID]; const parentThreadInfo = threadInfo.parentThreadID ? state.threadStore.threadInfos[threadInfo.parentThreadID] : null; const parentIndex = parentThreadInfo ? parentThreadInfo.id : '-1'; if (!threadParentToChildren[parentIndex]) { threadParentToChildren[parentIndex] = []; } threadParentToChildren[parentIndex].push(threadID); } const rootIDs = threadParentToChildren['-1']; if (!rootIDs) { // This should never happen, but if it somehow does we'll let the state // check mechanism resolve it... return state; } const threadInfos: { [string]: LegacyRawThreadInfo | RawThreadInfo, } = {}; const stack = [...rootIDs]; while (stack.length > 0) { const threadID = stack.shift(); const threadInfo = state.threadStore.threadInfos[threadID]; const parentThreadInfo = threadInfo.parentThreadID ? threadInfos[threadInfo.parentThreadID] : null; threadInfos[threadID] = { ...threadInfo, containingThreadID: getContainingThreadID( parentThreadInfo, threadInfo.type, ), community: getCommunity(parentThreadInfo), }; const children = threadParentToChildren[threadID]; if (children) { stack.push(...children); } } return { ...state, threadStore: { ...state.threadStore, threadInfos } }; }, [29]: (state: AppState) => { const legacyRawThreadInfos: { +[id: string]: LegacyRawThreadInfo, } = assertAllThreadInfosAreLegacy(state.threadStore.threadInfos); const updatedThreadInfos = migrateThreadStoreForEditThreadPermissions(legacyRawThreadInfos); return { ...state, threadStore: { ...state.threadStore, threadInfos: updatedThreadInfos, }, }; }, [30]: (state: AppState) => { const threadInfos = state.threadStore.threadInfos; const operations = [ { type: 'remove_all', }, ...Object.keys(threadInfos).map((id: string) => ({ type: 'replace', payload: { id, threadInfo: threadInfos[id] }, })), ]; try { commCoreModule.processThreadStoreOperationsSync( threadStoreOpsHandlers.convertOpsToClientDBOps(operations), ); } catch (exception) { console.log(exception); if (isTaskCancelledError(exception)) { return state; } return { ...state, cookie: null }; } return state; }, [31]: (state: AppState) => { const messages = state.messageStore.messages; const operations: $ReadOnlyArray = [ { type: 'remove_all', }, ...Object.keys(messages).map((id: string) => ({ type: 'replace', payload: translateRawMessageInfoToClientDBMessageInfo(messages[id]), })), ]; try { commCoreModule.processMessageStoreOperationsSync(operations); } catch (exception) { console.log(exception); if (isTaskCancelledError(exception)) { return state; } return { ...state, cookie: null }; } return state; }, [32]: (state: AppState) => unshimClientDB(state, [messageTypes.MULTIMEDIA]), [33]: (state: AppState) => unshimClientDB(state, [messageTypes.REACTION]), [34]: (state: any) => { const { threadIDsToNotifIDs, ...stateSansThreadIDsToNotifIDs } = state; return stateSansThreadIDsToNotifIDs; }, [35]: (state: AppState) => unshimClientDB(state, [messageTypes.MULTIMEDIA]), [36]: (state: AppState) => { // 1. Get threads and messages from SQLite `threads` and `messages` tables. const clientDBThreadInfos = commCoreModule.getAllThreadsSync(); const clientDBMessageInfos = commCoreModule.getAllMessagesSync(); // 2. Translate `ClientDBThreadInfo`s to `RawThreadInfo`s and // `ClientDBMessageInfo`s to `RawMessageInfo`s. const rawThreadInfos = clientDBThreadInfos.map( deprecatedConvertClientDBThreadInfoToRawThreadInfo, ); const rawMessageInfos = clientDBMessageInfos.map( translateClientDBMessageInfoToRawMessageInfo, ); // 3. Unshim translated `RawMessageInfos` to get the TOGGLE_PIN messages const unshimmedRawMessageInfos = rawMessageInfos.map(messageInfo => unshimFunc(messageInfo, new Set([messageTypes.TOGGLE_PIN])), ); // 4. Filter out non-TOGGLE_PIN messages const filteredRawMessageInfos = unshimmedRawMessageInfos .map(messageInfo => messageInfo.type === messageTypes.TOGGLE_PIN ? messageInfo : null, ) .filter(Boolean); // 5. We want only the last TOGGLE_PIN message for each message ID, // so 'pin', 'unpin', 'pin' don't count as 3 pins, but only 1. const lastMessageIDToRawMessageInfoMap = new Map(); for (const messageInfo of filteredRawMessageInfos) { const { targetMessageID } = messageInfo; lastMessageIDToRawMessageInfoMap.set(targetMessageID, messageInfo); } const lastMessageIDToRawMessageInfos = Array.from( lastMessageIDToRawMessageInfoMap.values(), ); // 6. Create a Map of threadIDs to pinnedCount const threadIDsToPinnedCount = new Map(); for (const messageInfo of lastMessageIDToRawMessageInfos) { const { threadID, type } = messageInfo; if (type === messageTypes.TOGGLE_PIN) { const pinnedCount = threadIDsToPinnedCount.get(threadID) || 0; threadIDsToPinnedCount.set(threadID, pinnedCount + 1); } } // 7. Include a pinnedCount for each rawThreadInfo const rawThreadInfosWithPinnedCount = rawThreadInfos.map(threadInfo => ({ ...threadInfo, pinnedCount: threadIDsToPinnedCount.get(threadInfo.id) || 0, })); // 8. Convert rawThreadInfos to a map of threadID to threadInfo const threadIDToThreadInfo = rawThreadInfosWithPinnedCount.reduce( ( acc: { [string]: LegacyRawThreadInfo }, threadInfo: LegacyRawThreadInfo, ) => { acc[threadInfo.id] = threadInfo; return acc; }, {}, ); // 9. Add threadPermission to each threadInfo const rawThreadInfosWithThreadPermission = persistMigrationForManagePinsThreadPermission(threadIDToThreadInfo); // 10. Convert the new threadInfos back into an array const rawThreadInfosWithCountAndPermission = Object.keys( rawThreadInfosWithThreadPermission, ).map(id => rawThreadInfosWithThreadPermission[id]); // 11. Translate `RawThreadInfo`s to `ClientDBThreadInfo`s. const convertedClientDBThreadInfos = rawThreadInfosWithCountAndPermission.map( convertRawThreadInfoToClientDBThreadInfo, ); // 12. Construct `ClientDBThreadStoreOperation`s to clear SQLite `threads` // table and repopulate with `ClientDBThreadInfo`s. const operations: $ReadOnlyArray = [ { type: 'remove_all', }, ...convertedClientDBThreadInfos.map((thread: ClientDBThreadInfo) => ({ type: 'replace', payload: thread, })), ]; // 13. Try processing `ClientDBThreadStoreOperation`s and log out if // `processThreadStoreOperationsSync(...)` throws an exception. try { commCoreModule.processThreadStoreOperationsSync(operations); } catch (exception) { console.log(exception); return { ...state, cookie: null }; } return state; }, [37]: (state: AppState) => { const operations = messageStoreOpsHandlers.convertOpsToClientDBOps([ { type: 'remove_all_threads', }, { type: 'replace_threads', payload: { threads: state.messageStore.threads }, }, ]); try { commCoreModule.processMessageStoreOperationsSync(operations); } catch (exception) { console.error(exception); if (isTaskCancelledError(exception)) { return state; } return { ...state, cookie: null }; } return state; }, [38]: (state: AppState) => deprecatedUpdateClientDBThreadStoreThreadInfos( state, legacyUpdateRolesAndPermissions, ), [39]: (state: AppState) => unshimClientDB(state, [messageTypes.EDIT_MESSAGE]), [40]: (state: AppState) => deprecatedUpdateClientDBThreadStoreThreadInfos( state, legacyUpdateRolesAndPermissions, ), [41]: (state: AppState) => { const queuedReports = state.reportStore.queuedReports.map(report => ({ ...report, id: getUUID(), })); return { ...state, reportStore: { ...state.reportStore, queuedReports }, }; }, [42]: (state: AppState) => { const reportStoreOperations: $ReadOnlyArray = [ { type: 'remove_all_reports' }, ...convertReportsToReplaceReportOps(state.reportStore.queuedReports), ]; const dbOperations: $ReadOnlyArray = reportStoreOpsHandlers.convertOpsToClientDBOps(reportStoreOperations); try { commCoreModule.processReportStoreOperationsSync(dbOperations); } catch (exception) { if (isTaskCancelledError(exception)) { return state; } return { ...state, cookie: null }; } return state; }, [43]: async (state: any) => { const { messages, drafts, threads, messageStoreThreads } = await commCoreModule.getClientDBStore(); const messageStoreThreadsOperations = createUpdateDBOpsForMessageStoreThreads( messageStoreThreads, convertMessageStoreThreadsToNewIDSchema, ); const messageStoreMessagesOperations = createUpdateDBOpsForMessageStoreMessages(messages, messageInfos => messageInfos.map(convertRawMessageInfoToNewIDSchema), ); const threadOperations = deprecatedCreateUpdateDBOpsForThreadStoreThreadInfos( threads, convertThreadStoreThreadInfosToNewIDSchema, ); const draftOperations = generateIDSchemaMigrationOpsForDrafts(drafts); try { await Promise.all([ commCoreModule.processMessageStoreOperations([ ...messageStoreMessagesOperations, ...messageStoreThreadsOperations, ]), commCoreModule.processThreadStoreOperations(threadOperations), commCoreModule.processDraftStoreOperations(draftOperations), ]); } catch (exception) { console.log(exception); return { ...state, cookie: null }; } const inviteLinksStore = state.inviteLinksStore ?? defaultState.inviteLinksStore; return { ...state, entryStore: convertEntryStoreToNewIDSchema(state.entryStore), messageStore: convertMessageStoreToNewIDSchema(state.messageStore), calendarFilters: state.calendarFilters.map( convertCalendarFilterToNewIDSchema, ), connection: convertConnectionInfoToNewIDSchema(state.connection), watchedThreadIDs: state.watchedThreadIDs.map( id => `${authoritativeKeyserverID}|${id}`, ), inviteLinksStore: convertInviteLinksStoreToNewIDSchema(inviteLinksStore), }; }, [44]: async (state: any) => { const { cookie, ...rest } = state; return { ...rest, keyserverStore: { keyserverInfos: { [authoritativeKeyserverID]: { cookie } }, }, }; }, [45]: async (state: any) => { const { updatesCurrentAsOf, keyserverStore, ...rest } = state; return { ...rest, keyserverStore: { ...keyserverStore, keyserverInfos: { ...keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...keyserverStore.keyserverInfos[authoritativeKeyserverID], updatesCurrentAsOf, }, }, }, }; }, [46]: async (state: AppState) => { const { currentAsOf } = state.messageStore; return { ...state, messageStore: { ...state.messageStore, currentAsOf: { [authoritativeKeyserverID]: currentAsOf }, }, }; }, [47]: async (state: any) => { const { urlPrefix, keyserverStore, ...rest } = state; return { ...rest, keyserverStore: { ...keyserverStore, keyserverInfos: { ...keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...keyserverStore.keyserverInfos[authoritativeKeyserverID], urlPrefix, }, }, }, }; }, [48]: async (state: any) => { const { connection, keyserverStore, ...rest } = state; return { ...rest, keyserverStore: { ...keyserverStore, keyserverInfos: { ...keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...keyserverStore.keyserverInfos[authoritativeKeyserverID], connection, }, }, }, }; }, [49]: async (state: AppState) => { const { keyserverStore, ...rest } = state; const { connection, ...keyserverRest } = keyserverStore.keyserverInfos[authoritativeKeyserverID]; return { ...rest, keyserverStore: { ...keyserverStore, keyserverInfos: { ...keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...keyserverRest, }, }, }, connection, }; }, [50]: async (state: any) => { const { connection, ...rest } = state; const { actualizedCalendarQuery, ...connectionRest } = connection; return { ...rest, connection: connectionRest, actualizedCalendarQuery, }; }, [51]: async (state: any) => { const { lastCommunicatedPlatformDetails, keyserverStore, ...rest } = state; return { ...rest, keyserverStore: { ...keyserverStore, keyserverInfos: { ...keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...keyserverStore.keyserverInfos[authoritativeKeyserverID], lastCommunicatedPlatformDetails, }, }, }, }; }, [52]: async (state: AppState) => ({ ...state, integrityStore: { threadHashes: {}, threadHashingStatus: 'data_not_loaded', }, }), [53]: (state: any) => { if (!state.userStore.inconsistencyReports) { return state; } const reportStoreOperations = convertReportsToReplaceReportOps( state.userStore.inconsistencyReports, ); const dbOperations: $ReadOnlyArray = reportStoreOpsHandlers.convertOpsToClientDBOps(reportStoreOperations); try { commCoreModule.processReportStoreOperationsSync(dbOperations); } catch (exception) { if (isTaskCancelledError(exception)) { return state; } return handleReduxMigrationFailure(state); } const { inconsistencyReports, ...newUserStore } = state.userStore; const queuedReports = reportStoreOpsHandlers.processStoreOperations( state.reportStore.queuedReports, reportStoreOperations, ); return { ...state, userStore: newUserStore, reportStore: { ...state.reportStore, queuedReports, }, }; }, [54]: (state: any) => { let updatedMessageStoreThreads: MessageStoreThreads = {}; for (const threadID: string in state.messageStore.threads) { const { lastNavigatedTo, lastPruned, ...rest } = state.messageStore.threads[threadID]; updatedMessageStoreThreads = { ...updatedMessageStoreThreads, [threadID]: rest, }; } return { ...state, messageStore: { ...state.messageStore, threads: updatedMessageStoreThreads, }, }; }, [55]: async (state: AppState) => __DEV__ ? { ...state, keyserverStore: { ...state.keyserverStore, keyserverInfos: { ...state.keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...state.keyserverStore.keyserverInfos[ authoritativeKeyserverID ], urlPrefix: defaultURLPrefix, }, }, }, } : state, [56]: (state: any) => { const { deviceToken, keyserverStore, ...rest } = state; return { ...rest, keyserverStore: { ...keyserverStore, keyserverInfos: { ...keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...keyserverStore.keyserverInfos[authoritativeKeyserverID], deviceToken, }, }, }, }; }, [57]: async (state: any) => { const { connection, keyserverStore: { keyserverInfos }, ...rest } = state; const newKeyserverInfos: { [string]: KeyserverInfo } = {}; for (const key in keyserverInfos) { newKeyserverInfos[key] = { ...keyserverInfos[key], connection: { ...defaultConnectionInfo }, }; } return { ...rest, keyserverStore: { ...state.keyserverStore, keyserverInfos: newKeyserverInfos, }, }; }, [58]: async (state: AppState) => { const userStoreOperations: $ReadOnlyArray = [ { type: 'remove_all_users' }, ...convertUserInfosToReplaceUserOps(state.userStore.userInfos), ]; const dbOperations: $ReadOnlyArray = userStoreOpsHandlers.convertOpsToClientDBOps(userStoreOperations); try { await commCoreModule.processUserStoreOperations(dbOperations); } catch (exception) { if (isTaskCancelledError(exception)) { return state; } return handleReduxMigrationFailure(state); } return state; }, [59]: (state: AppState) => { const clientDBThreadInfos = commCoreModule.getAllThreadsSync(); const rawThreadInfos = clientDBThreadInfos.map( deprecatedConvertClientDBThreadInfoToRawThreadInfo, ); const rawThreadInfosObject = rawThreadInfos.reduce( ( acc: { [string]: LegacyRawThreadInfo }, threadInfo: LegacyRawThreadInfo, ) => { acc[threadInfo.id] = threadInfo; return acc; }, {}, ); const migratedRawThreadInfos = persistMigrationToRemoveSelectRolePermissions(rawThreadInfosObject); const migratedThreadInfosArray = Object.keys(migratedRawThreadInfos).map( id => migratedRawThreadInfos[id], ); const convertedClientDBThreadInfos = migratedThreadInfosArray.map( convertRawThreadInfoToClientDBThreadInfo, ); const operations: $ReadOnlyArray = [ { type: 'remove_all', }, ...convertedClientDBThreadInfos.map((thread: ClientDBThreadInfo) => ({ type: 'replace', payload: thread, })), ]; try { commCoreModule.processThreadStoreOperationsSync(operations); } catch (exception) { console.log(exception); return handleReduxMigrationFailure(state); } return state; }, [60]: (state: AppState) => deprecatedUpdateClientDBThreadStoreThreadInfos( state, legacyUpdateRolesAndPermissions, handleReduxMigrationFailure, ), [61]: (state: AppState) => { const minimallyEncodeThreadInfosFunc = ( threadStoreInfos: MixedRawThreadInfos, ): MixedRawThreadInfos => Object.keys(threadStoreInfos).reduce( ( acc: { [string]: LegacyRawThreadInfo | RawThreadInfo, }, key: string, ) => { const threadInfo = threadStoreInfos[key]; acc[threadInfo.id] = threadInfo.minimallyEncoded ? threadInfo : minimallyEncodeRawThreadInfo(threadInfo); return acc; }, {}, ); return deprecatedUpdateClientDBThreadStoreThreadInfos( state, minimallyEncodeThreadInfosFunc, handleReduxMigrationFailure, ); }, [62]: async (state: AppState) => { const replaceOps: $ReadOnlyArray = entries( state.keyserverStore.keyserverInfos, ).map(([id, keyserverInfo]) => ({ type: 'replace_keyserver', payload: { id, keyserverInfo, }, })); const dbOperations: $ReadOnlyArray = keyserverStoreOpsHandlers.convertOpsToClientDBOps([ { type: 'remove_all_keyservers' }, ...replaceOps, ]); try { await commCoreModule.processKeyserverStoreOperations(dbOperations); } catch (exception) { if (isTaskCancelledError(exception)) { return state; } return handleReduxMigrationFailure(state); } return state; }, [63]: async (state: any) => { const { actualizedCalendarQuery, ...rest } = state; const operations: $ReadOnlyArray = entries( state.keyserverStore.keyserverInfos, ).map(([id, keyserverInfo]) => ({ type: 'replace_keyserver', payload: { id, keyserverInfo: { ...keyserverInfo, actualizedCalendarQuery: { ...actualizedCalendarQuery, filters: filterThreadIDsInFilterList( actualizedCalendarQuery.filters, (threadID: string) => extractKeyserverIDFromID(threadID) === id, ), }, }, }, })); const dbOperations: $ReadOnlyArray = keyserverStoreOpsHandlers.convertOpsToClientDBOps(operations); const newState = { ...rest, keyserverStore: keyserverStoreOpsHandlers.processStoreOperations( rest.keyserverStore, operations, ), }; try { await commCoreModule.processKeyserverStoreOperations(dbOperations); } catch (exception) { if (isTaskCancelledError(exception)) { return newState; } return handleReduxMigrationFailure(newState); } return newState; }, // Migration 64 is a noop to unblock a `native` release since the previous // contents are not ready to be deployed to prod and we don't want to // decrement migration 65. [64]: (state: AppState) => state, [65]: async (state: AppState) => { const replaceOp: ReplaceIntegrityThreadHashesOperation = { type: 'replace_integrity_thread_hashes', payload: { threadHashes: state.integrityStore.threadHashes, }, }; const dbOperations: $ReadOnlyArray = integrityStoreOpsHandlers.convertOpsToClientDBOps([ { type: 'remove_all_integrity_thread_hashes' }, replaceOp, ]); try { await commCoreModule.processIntegrityStoreOperations(dbOperations); } catch (exception) { if (isTaskCancelledError(exception)) { return state; } return handleReduxMigrationFailure(state); } return state; }, + [66]: async (state: AppState) => { + const stores = await commCoreModule.getClientDBStore(); + const keyserversDBInfo = stores.keyservers; + + const { translateClientDBData } = keyserverStoreOpsHandlers; + const keyservers = translateClientDBData(keyserversDBInfo); + + // There is no modification of the keyserver data, but the ops handling + // should correctly split the data between synced and non-synced tables + + const replaceOps: $ReadOnlyArray = entries( + keyservers, + ).map(([id, keyserverInfo]) => ({ + type: 'replace_keyserver', + payload: { + id, + keyserverInfo, + }, + })); + + const keyserverStoreOperations: $ReadOnlyArray = + keyserverStoreOpsHandlers.convertOpsToClientDBOps([ + { type: 'remove_all_keyservers' }, + ...replaceOps, + ]); + + try { + await commCoreModule.processKeyserverStoreOperations( + keyserverStoreOperations, + ); + } catch (exception) { + if (isTaskCancelledError(exception)) { + return state; + } + return handleReduxMigrationFailure(state); + } + + return state; + }, }; // After migration 31, we'll no longer want to persist `messageStore.messages` // via redux-persist. However, we DO want to continue persisting everything in // `messageStore` EXCEPT for `messages`. The `blacklist` property in // `persistConfig` allows us to specify top-level keys that shouldn't be // persisted. However, we aren't able to specify nested keys in `blacklist`. // As a result, if we want to prevent nested keys from being persisted we'll // need to use `createTransform(...)` to specify an `inbound` function that // allows us to modify the `state` object before it's passed through // `JSON.stringify(...)` and written to disk. We specify the keys for which // this transformation should be executed in the `whitelist` property of the // `config` object that's passed to `createTransform(...)`. type PersistedMessageStore = { +local: { +[id: string]: LocalMessageInfo }, +currentAsOf: { +[keyserverID: string]: number }, }; const messageStoreMessagesBlocklistTransform: Transform = createTransform( (state: MessageStore): PersistedMessageStore => { const { messages, threads, ...messageStoreSansMessages } = state; return { ...messageStoreSansMessages }; }, (state: MessageStore): MessageStore => { // We typically expect `messageStore.messages` to be `undefined` because // messages are persisted in the SQLite `messages` table rather than via // `redux-persist`. In this case we want to set `messageStore.messages` // to {} so we don't run into issues with `messageStore.messages` being // `undefined` (https://phab.comm.dev/D5545). // // However, in the case that a user is upgrading from a client where // `persistConfig.version` < 31, we expect `messageStore.messages` to // contain messages stored via `redux-persist` that we need in order // to correctly populate the SQLite `messages` table in migration 31 // (https://phab.comm.dev/D2600). // // However, because `messageStoreMessagesBlocklistTransform` modifies // `messageStore` before migrations are run, we need to make sure we aren't // inadvertently clearing `messageStore.messages` (by setting to {}) before // messages are stored in SQLite (https://linear.app/comm/issue/ENG-2377). return { ...state, threads: state.threads ?? {}, messages: state.messages ?? {}, }; }, { whitelist: ['messageStore'] }, ); type PersistedReportStore = $Diff< ReportStore, { +queuedReports: $ReadOnlyArray }, >; const reportStoreTransform: Transform = createTransform( (state: ReportStore): PersistedReportStore => { return { enabledReports: state.enabledReports }; }, (state: PersistedReportStore): ReportStore => { return { ...state, queuedReports: [] }; }, { whitelist: ['reportStore'] }, ); const persistConfig = { key: 'root', storage: AsyncStorage, blacklist: persistBlacklist, debug: __DEV__, - version: 65, + version: 66, transforms: [ messageStoreMessagesBlocklistTransform, reportStoreTransform, keyserverStoreTransform, ], migrate: (createAsyncMigrate(migrations, { debug: __DEV__ }): any), timeout: ((__DEV__ ? 0 : undefined): number | void), }; const codeVersion: number = commCoreModule.getCodeVersion(); // This local exists to avoid a circular dependency where redux-setup needs to // import all the navigation and screen stuff, but some of those screens want to // access the persistor to purge its state. let storedPersistor = null; function setPersistor(persistor: *) { storedPersistor = persistor; } function getPersistor(): empty { invariant(storedPersistor, 'should be set'); return storedPersistor; } export { persistConfig, codeVersion, setPersistor, getPersistor }; diff --git a/web/redux/persist.js b/web/redux/persist.js index 1b6d055f9..4e55294e1 100644 --- a/web/redux/persist.js +++ b/web/redux/persist.js @@ -1,373 +1,422 @@ // @flow import invariant from 'invariant'; import { getStoredState, purgeStoredState } from 'redux-persist'; import storage from 'redux-persist/es/storage/index.js'; import type { PersistConfig } from 'redux-persist/src/types.js'; import { type ClientDBKeyserverStoreOperation, keyserverStoreOpsHandlers, type ReplaceKeyserverOperation, } from 'lib/ops/keyserver-store-ops.js'; import { createAsyncMigrate, type StorageMigrationFunction, } from 'lib/shared/create-async-migrate.js'; import { keyserverStoreTransform } from 'lib/shared/transforms/keyserver-store-transform.js'; import { defaultCalendarQuery } from 'lib/types/entry-types.js'; import type { KeyserverInfo } from 'lib/types/keyserver-types.js'; import { cookieTypes } from 'lib/types/session-types.js'; import { defaultConnectionInfo } from 'lib/types/socket-types.js'; import { defaultGlobalThemeInfo } from 'lib/types/theme-types.js'; import { getConfig } from 'lib/utils/config.js'; import { parseCookies } from 'lib/utils/cookie-utils.js'; import { isDev } from 'lib/utils/dev-utils.js'; import { wipeKeyserverStore } from 'lib/utils/keyserver-store-utils.js'; import { generateIDSchemaMigrationOpsForDrafts, convertDraftStoreToNewIDSchema, } from 'lib/utils/migration-utils.js'; import { entries } from 'lib/utils/objects.js'; import { resetUserSpecificState } from 'lib/utils/reducers-utils.js'; import commReduxStorageEngine from './comm-redux-storage-engine.js'; import { defaultWebState } from './default-state.js'; import { rootKey, rootKeyPrefix } from './persist-constants.js'; import type { AppState } from './redux-setup.js'; import { nonUserSpecificFieldsWeb } from './redux-setup.js'; import { authoritativeKeyserverID } from '../authoritative-keyserver.js'; import { getCommSharedWorker } from '../shared-worker/shared-worker-provider.js'; import { getOlmWasmPath } from '../shared-worker/utils/constants.js'; import { isSQLiteSupported } from '../shared-worker/utils/db-utils.js'; import { workerRequestMessageTypes } from '../types/worker-types.js'; declare var keyserverURL: string; const persistWhitelist = [ 'enabledApps', 'notifPermissionAlertInfo', 'commServicesAccessToken', 'keyserverStore', 'globalThemeInfo', 'customServer', ]; function handleReduxMigrationFailure(oldState: AppState): AppState { const persistedNonUserSpecificFields = nonUserSpecificFieldsWeb.filter( field => persistWhitelist.includes(field) || field === '_persist', ); const stateAfterReset = resetUserSpecificState( oldState, defaultWebState, persistedNonUserSpecificFields, ); return { ...stateAfterReset, keyserverStore: wipeKeyserverStore(stateAfterReset.keyserverStore), }; } const migrations = { [1]: async (state: any) => { const { primaryIdentityPublicKey, ...stateWithoutPrimaryIdentityPublicKey } = state; return { ...stateWithoutPrimaryIdentityPublicKey, cryptoStore: { primaryAccount: null, primaryIdentityKeys: null, notificationAccount: null, notificationIdentityKeys: null, }, }; }, [2]: async (state: AppState) => { return state; }, [3]: async (state: AppState) => { let newState = state; if (state.draftStore) { newState = { ...newState, draftStore: convertDraftStoreToNewIDSchema(state.draftStore), }; } const sharedWorker = await getCommSharedWorker(); const isSupported = await sharedWorker.isSupported(); if (!isSupported) { return newState; } const stores = await sharedWorker.schedule({ type: workerRequestMessageTypes.GET_CLIENT_STORE, }); invariant(stores?.store, 'Stores should exist'); await sharedWorker.schedule({ type: workerRequestMessageTypes.PROCESS_STORE_OPERATIONS, storeOperations: { draftStoreOperations: generateIDSchemaMigrationOpsForDrafts( stores.store.drafts, ), }, }); return newState; }, [4]: async (state: any) => { const { lastCommunicatedPlatformDetails, keyserverStore, ...rest } = state; return { ...rest, keyserverStore: { ...keyserverStore, keyserverInfos: { ...keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...keyserverStore.keyserverInfos[authoritativeKeyserverID], lastCommunicatedPlatformDetails, }, }, }, }; }, [5]: async (state: any) => { const sharedWorker = await getCommSharedWorker(); const isSupported = await sharedWorker.isSupported(); if (!isSupported) { return state; } if (!state.draftStore) { return state; } const { drafts } = state.draftStore; const draftStoreOperations = []; for (const key in drafts) { const text = drafts[key]; draftStoreOperations.push({ type: 'update', payload: { key, text }, }); } await sharedWorker.schedule({ type: workerRequestMessageTypes.PROCESS_STORE_OPERATIONS, storeOperations: { draftStoreOperations }, }); return state; }, [6]: async (state: AppState) => ({ ...state, integrityStore: { threadHashes: {}, threadHashingStatus: 'starting' }, }), [7]: async (state: AppState): Promise => { if (!document.cookie) { return state; } const params = parseCookies(document.cookie); let cookie = null; if (params[cookieTypes.USER]) { cookie = `${cookieTypes.USER}=${params[cookieTypes.USER]}`; } else if (params[cookieTypes.ANONYMOUS]) { cookie = `${cookieTypes.ANONYMOUS}=${params[cookieTypes.ANONYMOUS]}`; } return { ...state, keyserverStore: { ...state.keyserverStore, keyserverInfos: { ...state.keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...state.keyserverStore.keyserverInfos[authoritativeKeyserverID], cookie, }, }, }, }; }, [8]: async (state: AppState) => ({ ...state, globalThemeInfo: defaultGlobalThemeInfo, }), [9]: async (state: AppState) => ({ ...state, keyserverStore: { ...state.keyserverStore, keyserverInfos: { ...state.keyserverStore.keyserverInfos, [authoritativeKeyserverID]: { ...state.keyserverStore.keyserverInfos[authoritativeKeyserverID], urlPrefix: keyserverURL, }, }, }, }), [10]: async (state: AppState) => { const { keyserverInfos } = state.keyserverStore; const newKeyserverInfos: { [string]: KeyserverInfo } = {}; for (const key in keyserverInfos) { newKeyserverInfos[key] = { ...keyserverInfos[key], connection: { ...defaultConnectionInfo }, updatesCurrentAsOf: 0, sessionID: null, }; } return { ...state, keyserverStore: { ...state.keyserverStore, keyserverInfos: newKeyserverInfos, }, }; }, [11]: async (state: AppState) => { const sharedWorker = await getCommSharedWorker(); const isSupported = await sharedWorker.isSupported(); if (!isSupported) { return state; } const replaceOps: $ReadOnlyArray = entries( state.keyserverStore.keyserverInfos, ).map(([id, keyserverInfo]) => ({ type: 'replace_keyserver', payload: { id, keyserverInfo, }, })); const keyserverStoreOperations: $ReadOnlyArray = keyserverStoreOpsHandlers.convertOpsToClientDBOps([ { type: 'remove_all_keyservers' }, ...replaceOps, ]); try { await sharedWorker.schedule({ type: workerRequestMessageTypes.PROCESS_STORE_OPERATIONS, storeOperations: { keyserverStoreOperations }, }); return state; } catch (e) { console.log(e); return handleReduxMigrationFailure(state); } }, [12]: async (state: AppState) => { const sharedWorker = await getCommSharedWorker(); const isSupported = await sharedWorker.isSupported(); if (!isSupported) { return state; } const replaceOps: $ReadOnlyArray = entries( state.keyserverStore.keyserverInfos, ) .filter(([, keyserverInfo]) => !keyserverInfo.actualizedCalendarQuery) .map(([id, keyserverInfo]) => ({ type: 'replace_keyserver', payload: { id, keyserverInfo: { ...keyserverInfo, actualizedCalendarQuery: defaultCalendarQuery( getConfig().platformDetails.platform, ), }, }, })); if (replaceOps.length === 0) { return state; } const newState = { ...state, keyserverStore: keyserverStoreOpsHandlers.processStoreOperations( state.keyserverStore, replaceOps, ), }; const keyserverStoreOperations = keyserverStoreOpsHandlers.convertOpsToClientDBOps(replaceOps); try { await sharedWorker.schedule({ type: workerRequestMessageTypes.PROCESS_STORE_OPERATIONS, storeOperations: { keyserverStoreOperations }, }); return newState; } catch (e) { console.log(e); return handleReduxMigrationFailure(newState); } }, [13]: async (state: any) => { const { cryptoStore, ...rest } = state; const sharedWorker = await getCommSharedWorker(); await sharedWorker.schedule({ type: workerRequestMessageTypes.INITIALIZE_CRYPTO_ACCOUNT, olmWasmPath: getOlmWasmPath(), initialCryptoStore: cryptoStore, }); return rest; }, + [14]: async (state: AppState) => { + const sharedWorker = await getCommSharedWorker(); + const isSupported = await sharedWorker.isSupported(); + + if (!isSupported) { + return state; + } + + const stores = await sharedWorker.schedule({ + type: workerRequestMessageTypes.GET_CLIENT_STORE, + }); + const keyserversDBInfo = stores?.store?.keyservers; + if (!keyserversDBInfo) { + return state; + } + + const { translateClientDBData } = keyserverStoreOpsHandlers; + const keyservers = translateClientDBData(keyserversDBInfo); + + // There is no modification of the keyserver data, but the ops handling + // should correctly split the data between synced and non-synced tables + + const replaceOps: $ReadOnlyArray = entries( + keyservers, + ).map(([id, keyserverInfo]) => ({ + type: 'replace_keyserver', + payload: { + id, + keyserverInfo, + }, + })); + + const keyserverStoreOperations: $ReadOnlyArray = + keyserverStoreOpsHandlers.convertOpsToClientDBOps([ + { type: 'remove_all_keyservers' }, + ...replaceOps, + ]); + + try { + await sharedWorker.schedule({ + type: workerRequestMessageTypes.PROCESS_STORE_OPERATIONS, + storeOperations: { keyserverStoreOperations }, + }); + return state; + } catch (e) { + console.log(e); + return handleReduxMigrationFailure(state); + } + }, }; const migrateStorageToSQLite: StorageMigrationFunction = async debug => { const sharedWorker = await getCommSharedWorker(); const isSupported = await sharedWorker.isSupported(); if (!isSupported) { return undefined; } const oldStorage = await getStoredState({ storage, key: rootKey }); if (!oldStorage) { return undefined; } purgeStoredState({ storage, key: rootKey }); if (debug) { console.log('redux-persist: migrating state to SQLite storage'); } const allKeys = Object.keys(oldStorage); const transforms = persistConfig.transforms ?? []; const newStorage = { ...oldStorage }; for (const transform of transforms) { for (const key of allKeys) { const transformedStore = transform.out(newStorage[key], key, newStorage); newStorage[key] = transformedStore; } } return newStorage; }; const persistConfig: PersistConfig = { keyPrefix: rootKeyPrefix, key: rootKey, storage: commReduxStorageEngine, whitelist: isSQLiteSupported() ? persistWhitelist : [...persistWhitelist, 'draftStore'], migrate: (createAsyncMigrate( migrations, { debug: isDev }, migrateStorageToSQLite, ): any), - version: 13, + version: 14, transforms: [keyserverStoreTransform], }; export { persistConfig }; diff --git a/web/shared-worker/_generated/comm_query_executor.wasm b/web/shared-worker/_generated/comm_query_executor.wasm index b56da8048..7c3182ae4 100755 Binary files a/web/shared-worker/_generated/comm_query_executor.wasm and b/web/shared-worker/_generated/comm_query_executor.wasm differ