diff --git a/services/backup/docker-server/contents/server/dev/DevTools.h b/services/backup/docker-server/contents/server/dev/DevTools.h
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/dev/DevTools.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#pragma once
-
-#include <string>
-
-namespace comm {
-namespace network {
-
-const std::string commFilesystemPath = "/tmp/comm";
-
-std::string createCommPath(const std::string &path);
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/dev/DevTools.cpp b/services/backup/docker-server/contents/server/dev/DevTools.cpp
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/dev/DevTools.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-#include "DevTools.h"
-
-namespace comm {
-namespace network {
-
-std::string createCommPath(const std::string &path) {
-  if (path.substr(0, commFilesystemPath.size()) == commFilesystemPath) {
-    return path;
-  }
-  return commFilesystemPath + "/" + path;
-}
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/src/AwsS3Bucket.h b/services/backup/docker-server/contents/server/src/AwsS3Bucket.h
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/src/AwsS3Bucket.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#pragma once
-
-#include <aws/core/Aws.h>
-#include <aws/s3/S3Client.h>
-
-#include <functional>
-#include <string>
-#include <vector>
-
-namespace comm {
-namespace network {
-
-class AwsS3Bucket {
-  const std::string name;
-  std::shared_ptr<Aws::S3::S3Client> client;
-
-public:
-  AwsS3Bucket(
-      const std::string name,
-      std::shared_ptr<Aws::S3::S3Client> client);
-
-  std::vector<std::string> listObjects();
-  bool isAvailable() const;
-  const size_t getObjectSize(const std::string &objectName);
-  void renameObject(const std::string &currentName, const std::string &newName);
-  void writeObject(const std::string &objectName, const std::string data);
-  std::string getObjectData(const std::string &objectName);
-  void getObjectDataChunks(
-      const std::string &objectName,
-      const std::function<void(const std::string &)> &callback,
-      const size_t chunkSize);
-  void appendToObject(const std::string &objectName, const std::string data);
-  void clearObject(const std::string &objectName);
-  void deleteObject(const std::string &objectName);
-};
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/src/AwsS3Bucket.cpp b/services/backup/docker-server/contents/server/src/AwsS3Bucket.cpp
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/src/AwsS3Bucket.cpp
+++ /dev/null
@@ -1,222 +0,0 @@
-#include "AwsS3Bucket.h"
-#include "MultiPartUploader.h"
-#include "Tools.h"
-
-#include <aws/s3/model/CopyObjectRequest.h>
-#include <aws/s3/model/DeleteObjectRequest.h>
-#include <aws/s3/model/GetObjectRequest.h>
-#include <aws/s3/model/HeadBucketRequest.h>
-#include <aws/s3/model/HeadObjectRequest.h>
-#include <aws/s3/model/ListObjectsRequest.h>
-#include <aws/s3/model/Object.h>
-#include <aws/s3/model/PutObjectRequest.h>
-
-#include <boost/interprocess/streams/bufferstream.hpp>
-
-namespace comm {
-namespace network {
-
-AwsS3Bucket::AwsS3Bucket(
-    const std::string name,
-    std::shared_ptr<Aws::S3::S3Client> client)
-    : name(name), client(client) {
-}
-
-std::vector<std::string> AwsS3Bucket::listObjects() {
-  Aws::S3::Model::ListObjectsRequest request;
-  request.SetBucket(this->name);
-  std::vector<std::string> result;
-
-  Aws::S3::Model::ListObjectsOutcome outcome =
-      this->client->ListObjects(request);
-  if (!outcome.IsSuccess()) {
-    throw std::runtime_error(outcome.GetError().GetMessage());
-  }
-  Aws::Vector<Aws::S3::Model::Object> objects =
-      outcome.GetResult().GetContents();
-  for (Aws::S3::Model::Object &object : objects) {
-    result.push_back(object.GetKey());
-  }
-  return result;
-}
-
-bool AwsS3Bucket::isAvailable() const {
-  Aws::S3::Model::HeadBucketRequest headRequest;
-  headRequest.SetBucket(this->name);
-  Aws::S3::Model::HeadBucketOutcome outcome =
-      this->client->HeadBucket(headRequest);
-  return outcome.IsSuccess();
-}
-
-const size_t AwsS3Bucket::getObjectSize(const std::string &objectName) {
-  Aws::S3::Model::HeadObjectRequest headRequest;
-  headRequest.SetBucket(this->name);
-  headRequest.SetKey(objectName);
-  Aws::S3::Model::HeadObjectOutcome headOutcome =
-      this->client->HeadObject(headRequest);
-  if (!headOutcome.IsSuccess()) {
-    throw std::runtime_error(headOutcome.GetError().GetMessage());
-  }
-  return headOutcome.GetResultWithOwnership().GetContentLength();
-}
-
-void AwsS3Bucket::renameObject(
-    const std::string &currentName,
-    const std::string &newName) {
-  Aws::S3::Model::CopyObjectRequest copyRequest;
-  copyRequest.SetCopySource(this->name + "/" + currentName);
-  copyRequest.SetKey(newName);
-  copyRequest.SetBucket(this->name);
-
-  Aws::S3::Model::CopyObjectOutcome copyOutcome =
-      this->client->CopyObject(copyRequest);
-  if (!copyOutcome.IsSuccess()) {
-    throw std::runtime_error(copyOutcome.GetError().GetMessage());
-  }
-
-  this->deleteObject(currentName);
-}
-
-void AwsS3Bucket::writeObject(
-    const std::string &objectName,
-    const std::string data) {
-  // we don't have to handle multiple write here because the GRPC limit is 4MB
-  // and minimum size of data to perform multipart upload is 5MB
-  Aws::S3::Model::PutObjectRequest request;
-  request.SetBucket(this->name);
-  request.SetKey(objectName);
-
-  std::shared_ptr<Aws::IOStream> body = std::shared_ptr<Aws::IOStream>(
-      new boost::interprocess::bufferstream((char *)data.data(), data.size()));
-
-  request.SetBody(body);
-
-  Aws::S3::Model::PutObjectOutcome outcome = this->client->PutObject(request);
-
-  if (!outcome.IsSuccess()) {
-    throw std::runtime_error(outcome.GetError().GetMessage());
-  }
-}
-
-std::string AwsS3Bucket::getObjectData(const std::string &objectName) {
-  Aws::S3::Model::GetObjectRequest request;
-  request.SetBucket(this->name);
-  request.SetKey(objectName);
-
-  Aws::S3::Model::GetObjectOutcome outcome = this->client->GetObject(request);
-
-  if (!outcome.IsSuccess()) {
-    throw std::runtime_error(outcome.GetError().GetMessage());
-  }
-
-  const size_t size = this->getObjectSize(objectName);
-  if (size > GRPC_CHUNK_SIZE_LIMIT) {
-    throw invalid_argument_error(std::string(
-        "The file is too big(" + std::to_string(size) + " bytes, max is " +
-        std::to_string(GRPC_CHUNK_SIZE_LIMIT) +
-        "bytes), please, use getObjectDataChunks"));
-  }
-  Aws::IOStream &retrievedFile = outcome.GetResultWithOwnership().GetBody();
-
-  std::string result;
-  result.resize(size);
-  retrievedFile.get((char *)result.data(), size + 1);
-
-  return result;
-}
-
-void AwsS3Bucket::getObjectDataChunks(
-    const std::string &objectName,
-    const std::function<void(const std::string &)> &callback,
-    const size_t chunkSize) {
-  const size_t fileSize = this->getObjectSize(objectName);
-
-  if (fileSize == 0) {
-    return;
-  }
-
-  Aws::S3::Model::GetObjectRequest request;
-  request.SetBucket(this->name);
-  request.SetKey(objectName);
-  for (size_t offset = 0; offset < fileSize; offset += chunkSize) {
-    const size_t nextSize = std::min(chunkSize, fileSize - offset);
-
-    std::string range = "bytes=" + std::to_string(offset) + "-" +
-        std::to_string(offset + nextSize);
-    request.SetRange(range);
-
-    Aws::S3::Model::GetObjectOutcome getOutcome =
-        this->client->GetObject(request);
-    if (!getOutcome.IsSuccess()) {
-      throw std::runtime_error(getOutcome.GetError().GetMessage());
-    }
-
-    Aws::IOStream &retrievedFile =
-        getOutcome.GetResultWithOwnership().GetBody();
-    std::string result;
-    result.resize(nextSize);
-    retrievedFile.get((char *)result.data(), nextSize + 1);
-    callback(result);
-  }
-}
-
-void AwsS3Bucket::appendToObject(
-    const std::string &objectName,
-    const std::string data) {
-  const size_t objectSize = this->getObjectSize(objectName);
-  if (objectSize < AWS_MULTIPART_UPLOAD_MINIMUM_CHUNK_SIZE) {
-    std::string currentData = this->getObjectData(objectName);
-    currentData += data;
-    this->writeObject(objectName, currentData);
-    return;
-  }
-  size_t currentSize = 0;
-  MultiPartUploader uploader(
-      this->client, this->name, objectName + "-multipart");
-  std::function<void(const std::string &)> callback =
-      [&uploader, &data, &currentSize, objectSize](const std::string &chunk) {
-        currentSize += chunk.size();
-        if (currentSize < objectSize) {
-          uploader.addPart(chunk);
-        } else if (currentSize == objectSize) {
-          uploader.addPart(std::string(chunk + data));
-        } else {
-          throw std::runtime_error(
-              "size of chunks exceeds the size of the object");
-        }
-      };
-  this->getObjectDataChunks(
-      objectName, callback, AWS_MULTIPART_UPLOAD_MINIMUM_CHUNK_SIZE);
-  uploader.finishUpload();
-  // this will overwrite the target file
-  this->renameObject(objectName + "-multipart", objectName);
-  const size_t newSize = this->getObjectSize(objectName);
-  if (objectSize + data.size() != newSize) {
-    throw std::runtime_error(
-        "append to object " + objectName +
-        " has been performed but the final sizes don't "
-        "match, the size is now [" +
-        std::to_string(newSize) + "] but should be [" +
-        std::to_string(objectSize + data.size()) + "]");
-  }
-}
-
-void AwsS3Bucket::clearObject(const std::string &objectName) {
-  this->writeObject(objectName, "");
-}
-
-void AwsS3Bucket::deleteObject(const std::string &objectName) {
-  Aws::S3::Model::DeleteObjectRequest deleteRequest;
-
-  deleteRequest.SetKey(objectName);
-  deleteRequest.SetBucket(this->name);
-
-  Aws::S3::Model::DeleteObjectOutcome deleteOutcome =
-      this->client->DeleteObject(deleteRequest);
-  if (!deleteOutcome.IsSuccess()) {
-    throw std::runtime_error(deleteOutcome.GetError().GetMessage());
-  }
-}
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/src/AwsS3Bucket.dev.cpp b/services/backup/docker-server/contents/server/src/AwsS3Bucket.dev.cpp
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/src/AwsS3Bucket.dev.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-#include "AwsS3Bucket.h"
-#include "DevTools.h"
-#include "Tools.h"
-
-#include <filesystem>
-#include <fstream>
-#include <iostream>
-
-namespace comm {
-namespace network {
-
-AwsS3Bucket::AwsS3Bucket(
-    const std::string name,
-    std::shared_ptr<Aws::S3::S3Client> client)
-    : name(name), client(nullptr) {
-  std::filesystem::create_directories(commFilesystemPath);
-}
-
-std::vector<std::string> AwsS3Bucket::listObjects() {
-  std::vector<std::string> result;
-  for (const auto &entry :
-       std::filesystem::directory_iterator(commFilesystemPath)) {
-    result.push_back(entry.path());
-  }
-  return result;
-}
-
-bool AwsS3Bucket::isAvailable() const {
-  return std::filesystem::exists(commFilesystemPath);
-}
-
-const size_t AwsS3Bucket::getObjectSize(const std::string &objectName) {
-  return std::filesystem::file_size(createCommPath(objectName));
-}
-
-void AwsS3Bucket::renameObject(
-    const std::string &currentName,
-    const std::string &newName) {
-  std::filesystem::rename(createCommPath(currentName), createCommPath(newName));
-}
-
-void AwsS3Bucket::writeObject(
-    const std::string &objectName,
-    const std::string data) {
-  if (std::filesystem::exists(createCommPath(objectName))) {
-    this->clearObject(createCommPath(objectName));
-  }
-  std::ofstream ofs(createCommPath(objectName));
-  ofs << data;
-}
-
-std::string AwsS3Bucket::getObjectData(const std::string &objectName) {
-  std::ifstream ifs(
-      createCommPath(objectName),
-      std::ios::in | std::ios::binary | std::ios::ate);
-
-  std::ifstream::pos_type fileSize = ifs.tellg();
-  ifs.seekg(0, std::ios::beg);
-  if (fileSize > GRPC_CHUNK_SIZE_LIMIT) {
-    throw invalid_argument_error(std::string(
-        "The file is too big(" + std::to_string(fileSize) + " bytes, max is " +
-        std::to_string(GRPC_CHUNK_SIZE_LIMIT) +
-        "bytes), please, use getObjectDataChunks"));
-  }
-
-  std::string bytes;
-  bytes.resize(fileSize);
-  ifs.read((char *)bytes.data(), fileSize);
-
-  return bytes;
-}
-
-void AwsS3Bucket::getObjectDataChunks(
-    const std::string &objectName,
-    const std::function<void(const std::string &)> &callback,
-    const size_t chunkSize) {
-  std::ifstream ifs(
-      createCommPath(objectName),
-      std::ios::in | std::ios::binary | std::ios::ate);
-
-  std::ifstream::pos_type fileSize = ifs.tellg();
-
-  size_t filePos = 0;
-  while (filePos < fileSize) {
-    ifs.seekg(filePos, std::ios::beg);
-    std::string bytes;
-    bytes.resize(chunkSize);
-    ifs.read((char *)bytes.data(), chunkSize);
-    filePos += bytes.size();
-    callback(bytes);
-  }
-}
-
-void AwsS3Bucket::appendToObject(
-    const std::string &objectName,
-    const std::string data) {
-  std::ofstream ofs;
-  ofs.open(createCommPath(objectName), std::ios_base::app);
-  ofs << data;
-}
-
-void AwsS3Bucket::clearObject(const std::string &objectName) {
-  std::filesystem::resize_file(createCommPath(objectName), 0);
-}
-
-void AwsS3Bucket::deleteObject(const std::string &objectName) {
-  std::filesystem::remove(createCommPath(objectName));
-}
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/src/AwsStorageManager.h b/services/backup/docker-server/contents/server/src/AwsStorageManager.h
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/src/AwsStorageManager.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#pragma once
-
-#include "AwsS3Bucket.h"
-
-#include <aws/core/Aws.h>
-#include <aws/s3/S3Client.h>
-
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace comm {
-namespace network {
-
-class AwsStorageManager {
-  const std::string region = "us-east-2";
-  std::shared_ptr<Aws::S3::S3Client> client;
-
-public:
-  AwsStorageManager();
-  AwsS3Bucket getBucket(const std::string &bucketName);
-  std::vector<std::string> listBuckets();
-};
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/src/AwsStorageManager.cpp b/services/backup/docker-server/contents/server/src/AwsStorageManager.cpp
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/src/AwsStorageManager.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-#include "AwsStorageManager.h"
-
-#include "Tools.h"
-
-#include <aws/s3/model/Bucket.h>
-
-namespace comm {
-namespace network {
-
-AwsStorageManager::AwsStorageManager() {
-  Aws::Client::ClientConfiguration config;
-  config.region = this->region;
-  this->client = std::make_shared<Aws::S3::S3Client>(config);
-}
-
-AwsS3Bucket AwsStorageManager::getBucket(const std::string &bucketName) {
-  return AwsS3Bucket(bucketName, this->client);
-}
-
-std::vector<std::string> AwsStorageManager::listBuckets() {
-  Aws::S3::Model::ListBucketsOutcome outcome = this->client->ListBuckets();
-  std::vector<std::string> result;
-  if (!outcome.IsSuccess()) {
-    throw std::runtime_error(outcome.GetError().GetMessage());
-  }
-  Aws::Vector<Aws::S3::Model::Bucket> buckets =
-      outcome.GetResult().GetBuckets();
-  for (Aws::S3::Model::Bucket &bucket : buckets) {
-    result.push_back(bucket.GetName());
-  }
-  return result;
-}
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/src/MultiPartUploader.h b/services/backup/docker-server/contents/server/src/MultiPartUploader.h
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/src/MultiPartUploader.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#pragma once
-
-#include <aws/core/Aws.h>
-#include <aws/s3/S3Client.h>
-#include <aws/s3/model/CompleteMultipartUploadRequest.h>
-
-#include <memory>
-#include <string>
-
-namespace comm {
-namespace network {
-
-class MultiPartUploader {
-  std::shared_ptr<Aws::S3::S3Client> client;
-  const std::string bucketName;
-  const std::string objectName;
-  size_t partCounter = 0;
-  std::vector<size_t> partsSizes;
-
-  Aws::S3::Model::CompleteMultipartUploadRequest completeMultipartUploadRequest;
-  Aws::S3::Model::CompletedMultipartUpload completedMultipartUpload;
-  std::string uploadId;
-
-  size_t partNumber = 1;
-
-public:
-  MultiPartUploader(
-      std::shared_ptr<Aws::S3::S3Client> client,
-      const std::string bucketName,
-      const std::string objectName);
-  void addPart(const std::string &part);
-  void finishUpload();
-};
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/src/MultiPartUploader.cpp b/services/backup/docker-server/contents/server/src/MultiPartUploader.cpp
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/src/MultiPartUploader.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-#include "MultiPartUploader.h"
-#include "Tools.h"
-
-#include <aws/core/utils/HashingUtils.h>
-#include <aws/s3/model/CreateMultipartUploadRequest.h>
-#include <aws/s3/model/GetObjectRequest.h>
-#include <aws/s3/model/Object.h>
-#include <aws/s3/model/UploadPartRequest.h>
-
-#include <boost/interprocess/streams/bufferstream.hpp>
-
-namespace comm {
-namespace network {
-
-MultiPartUploader::MultiPartUploader(
-    std::shared_ptr<Aws::S3::S3Client> client,
-    const std::string bucketName,
-    const std::string objectName)
-    : client(client), bucketName(bucketName), objectName(objectName) {
-  this->completeMultipartUploadRequest.SetBucket(this->bucketName);
-  this->completeMultipartUploadRequest.SetKey(this->objectName);
-
-  Aws::S3::Model::CreateMultipartUploadRequest createRequest;
-  createRequest.SetBucket(this->bucketName);
-  createRequest.SetKey(this->objectName);
-  createRequest.SetContentType("text/plain");
-
-  Aws::S3::Model::CreateMultipartUploadOutcome createOutcome =
-      this->client->CreateMultipartUpload(createRequest);
-
-  if (!createOutcome.IsSuccess()) {
-    throw std::runtime_error(createOutcome.GetError().GetMessage());
-  }
-  this->uploadId = createOutcome.GetResult().GetUploadId();
-  this->completeMultipartUploadRequest.SetUploadId(this->uploadId);
-}
-
-void MultiPartUploader::addPart(const std::string &part) {
-  Aws::S3::Model::UploadPartRequest uploadRequest;
-  uploadRequest.SetBucket(this->bucketName);
-  uploadRequest.SetKey(this->objectName);
-  uploadRequest.SetPartNumber(this->partNumber);
-  uploadRequest.SetUploadId(this->uploadId);
-
-  std::shared_ptr<Aws::IOStream> body = std::shared_ptr<Aws::IOStream>(
-      new boost::interprocess::bufferstream((char *)part.data(), part.size()));
-
-  uploadRequest.SetBody(body);
-
-  Aws::Utils::ByteBuffer partMd5(Aws::Utils::HashingUtils::CalculateMD5(*body));
-  uploadRequest.SetContentMD5(Aws::Utils::HashingUtils::Base64Encode(partMd5));
-
-  uploadRequest.SetContentLength(part.size());
-
-  Aws::S3::Model::UploadPartOutcome uploadPartOutcome =
-      this->client->UploadPart(uploadRequest);
-  Aws::S3::Model::CompletedPart completedPart;
-  completedPart.SetPartNumber(this->partNumber);
-  std::string eTag = uploadPartOutcome.GetResult().GetETag();
-  if (eTag.empty()) {
-    throw std::runtime_error("etag empty");
-  }
-  completedPart.SetETag(eTag);
-  completedMultipartUpload.AddParts(completedPart);
-  ++this->partNumber;
-}
-
-void MultiPartUploader::finishUpload() {
-  this->completeMultipartUploadRequest.SetMultipartUpload(
-      this->completedMultipartUpload);
-
-  Aws::S3::Model::CompleteMultipartUploadOutcome completeUploadOutcome =
-      this->client->CompleteMultipartUpload(
-          this->completeMultipartUploadRequest);
-
-  if (!completeUploadOutcome.IsSuccess()) {
-    throw std::runtime_error(completeUploadOutcome.GetError().GetMessage());
-  }
-}
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/src/MultiPartUploader.dev.cpp b/services/backup/docker-server/contents/server/src/MultiPartUploader.dev.cpp
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/src/MultiPartUploader.dev.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-#include "MultiPartUploader.h"
-#include "AwsS3Bucket.h"
-#include "DevTools.h"
-#include "Tools.h"
-
-#include <memory>
-
-namespace comm {
-namespace network {
-
-std::unique_ptr<AwsS3Bucket> bucket;
-
-MultiPartUploader::MultiPartUploader(
-    std::shared_ptr<Aws::S3::S3Client> client,
-    const std::string bucketName,
-    const std::string objectName)
-    : client(nullptr), bucketName(bucketName), objectName(objectName) {
-  bucket->writeObject(createCommPath(this->objectName), "");
-}
-
-void MultiPartUploader::addPart(const std::string &part) {
-  AwsS3Bucket(bucketName, nullptr)
-      .appendToObject(createCommPath(this->objectName + "mpu"), part);
-  this->partsSizes.push_back(part.size());
-  ++this->partCounter;
-}
-
-void MultiPartUploader::finishUpload() {
-  AwsS3Bucket bucket(bucketName, nullptr);
-  for (size_t i = 0; i < this->partsSizes.size() - 1; ++i) {
-    if (this->partsSizes.at(i) < AWS_MULTIPART_UPLOAD_MINIMUM_CHUNK_SIZE) {
-      bucket.deleteObject(createCommPath(this->objectName + "mpu"));
-      throw std::runtime_error("too small part detected");
-    }
-  }
-  bucket.renameObject(
-      createCommPath(this->objectName + "mpu"),
-      createCommPath(this->objectName));
-}
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/test/MultiPartUploadTest.cpp b/services/backup/docker-server/contents/server/test/MultiPartUploadTest.cpp
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/test/MultiPartUploadTest.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-#include <gtest/gtest.h>
-
-#include "AwsS3Bucket.h"
-#include "AwsStorageManager.h"
-#include "MultiPartUploader.h"
-#include "TestTools.h"
-#include "Tools.h"
-
-#include <aws/core/Aws.h>
-#include <aws/s3/S3Client.h>
-
-#include <string>
-
-using namespace comm::network;
-
-class MultiPartUploadTest : public testing::Test {
-protected:
-  std::shared_ptr<Aws::S3::S3Client> s3Client;
-  const std::string bucketName = "commapp-test";
-  std::unique_ptr<AwsS3Bucket> bucket;
-
-  virtual void SetUp() {
-    Aws::InitAPI({});
-    Aws::Client::ClientConfiguration config;
-    config.region = "us-east-2";
-    s3Client = std::make_shared<Aws::S3::S3Client>(config);
-    bucket = std::make_unique<AwsS3Bucket>(bucketName, s3Client);
-  }
-
-  virtual void TearDown() {
-    Aws::ShutdownAPI({});
-  }
-};
-
-std::string generateNByes(const size_t n) {
-  std::string result;
-  result.resize(n);
-  memset((char *)result.data(), 'A', n);
-  return result;
-}
-
-TEST_F(MultiPartUploadTest, ThrowingTooSmallPart) {
-  std::string objectName = createObject(*bucket);
-  MultiPartUploader mpu(s3Client, bucketName, objectName);
-  mpu.addPart("xxx");
-  mpu.addPart("xxx");
-  EXPECT_THROW(mpu.finishUpload(), std::runtime_error);
-}
-
-TEST_F(MultiPartUploadTest, ThrowingTooSmallPartOneByte) {
-  std::string objectName = createObject(*bucket);
-  MultiPartUploader mpu(s3Client, bucketName, objectName);
-  mpu.addPart(generateNByes(AWS_MULTIPART_UPLOAD_MINIMUM_CHUNK_SIZE - 1));
-  mpu.addPart("xxx");
-  EXPECT_THROW(mpu.finishUpload(), std::runtime_error);
-}
-
-TEST_F(MultiPartUploadTest, SuccessfulWriteMultipleChunks) {
-  std::string objectName = createObject(*bucket);
-  MultiPartUploader mpu(s3Client, bucketName, objectName);
-  mpu.addPart(generateNByes(AWS_MULTIPART_UPLOAD_MINIMUM_CHUNK_SIZE));
-  mpu.addPart("xxx");
-  mpu.finishUpload();
-  EXPECT_THROW(bucket->getObjectData(objectName), invalid_argument_error);
-  EXPECT_EQ(
-      bucket->getObjectSize(objectName),
-      AWS_MULTIPART_UPLOAD_MINIMUM_CHUNK_SIZE + 3);
-  bucket->deleteObject(objectName);
-}
-
-TEST_F(MultiPartUploadTest, SuccessfulWriteOneChunk) {
-  std::string objectName = createObject(*bucket);
-  MultiPartUploader mpu(s3Client, bucketName, objectName);
-  mpu.addPart("xxx");
-  mpu.finishUpload();
-  EXPECT_EQ(bucket->getObjectSize(objectName), 3);
-  bucket->deleteObject(objectName);
-}
diff --git a/services/backup/docker-server/contents/server/test/StorageManagerTest.cpp b/services/backup/docker-server/contents/server/test/StorageManagerTest.cpp
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/test/StorageManagerTest.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-#include <gtest/gtest.h>
-
-#include "AwsStorageManager.h"
-#include "TestTools.h"
-
-#include <aws/core/Aws.h>
-
-#include <chrono>
-#include <iostream>
-#include <memory>
-#include <string>
-
-using namespace comm::network;
-
-class StorageManagerTest : public testing::Test {
-public:
-protected:
-  std::unique_ptr<AwsStorageManager> storageManager;
-  const std::string bucketName = "commapp-test";
-  const std::string data =
-      "yiU3VaZlKfTteO10yrWmK1Q5BOvBQrdmj2aBlnoLuhxLfRZK1n8"
-      "26FRXJAGhPswR1r8yxtwxyLkv3I4J4tlH4brDP10mrB99XpM6";
-
-  virtual void SetUp() {
-    Aws::InitAPI({});
-    if (storageManager == nullptr) {
-      storageManager = std::make_unique<AwsStorageManager>();
-    }
-  }
-
-  virtual void TearDown() {
-    Aws::ShutdownAPI({});
-  }
-};
-
-TEST_F(StorageManagerTest, ObjectOperationsTest) {
-  EXPECT_TRUE(storageManager->getBucket(bucketName).isAvailable());
-  std::string objectName = createObject(storageManager->getBucket(bucketName));
-
-  storageManager->getBucket(bucketName).writeObject(objectName, data);
-
-  EXPECT_EQ(
-      storageManager->getBucket(bucketName).getObjectSize(objectName),
-      data.size());
-  EXPECT_TRUE(
-      storageManager->getBucket(bucketName).getObjectData(objectName) == data);
-  std::string chunkedData;
-  const size_t chunkSize = data.size() / 10;
-  std::function<void(const std::string &)> callback =
-      [&chunkedData](const std::string &chunk) { chunkedData += chunk; };
-  storageManager->getBucket(bucketName)
-      .getObjectDataChunks(objectName, callback, chunkSize);
-  EXPECT_TRUE(data == chunkedData);
-
-  storageManager->getBucket(bucketName)
-      .renameObject(objectName, objectName + "c");
-  EXPECT_THROW(
-      storageManager->getBucket(bucketName).getObjectData(objectName),
-      std::runtime_error);
-  EXPECT_TRUE(
-      storageManager->getBucket(bucketName).getObjectData(objectName + "c") ==
-      data);
-  storageManager->getBucket(bucketName)
-      .renameObject(objectName + "c", objectName);
-
-  storageManager->getBucket(bucketName).clearObject(objectName);
-  EXPECT_EQ(storageManager->getBucket(bucketName).getObjectSize(objectName), 0);
-
-  storageManager->getBucket(bucketName).deleteObject(objectName);
-  EXPECT_THROW(
-      storageManager->getBucket(bucketName).getObjectData(objectName),
-      std::runtime_error);
-}
diff --git a/services/backup/docker-server/contents/server/test/TestTools.h b/services/backup/docker-server/contents/server/test/TestTools.h
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/test/TestTools.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#pragma once
-
-#include "AwsS3Bucket.h"
-
-#include <chrono>
-#include <string>
-
-namespace comm {
-namespace network {
-
-std::string generateObjectName();
-std::string createObject(AwsS3Bucket bucket);
-
-} // namespace network
-} // namespace comm
diff --git a/services/backup/docker-server/contents/server/test/TestTools.cpp b/services/backup/docker-server/contents/server/test/TestTools.cpp
deleted file mode 100644
--- a/services/backup/docker-server/contents/server/test/TestTools.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-#include "TestTools.h"
-#include "AwsS3Bucket.h"
-
-namespace comm {
-namespace network {
-
-std::string generateObjectName() {
-  std::chrono::milliseconds ms =
-      std::chrono::duration_cast<std::chrono::milliseconds>(
-          std::chrono::system_clock::now().time_since_epoch());
-  return std::to_string(ms.count());
-}
-
-std::string createObject(AwsS3Bucket bucket) {
-  std::string objectName;
-  std::vector<std::string> presentObjects;
-  do {
-    objectName = generateObjectName();
-    presentObjects = bucket.listObjects();
-  } while (
-      std::find(presentObjects.begin(), presentObjects.end(), objectName) !=
-      presentObjects.end());
-  return objectName;
-}
-
-} // namespace network
-} // namespace comm