diff --git a/nix/dev-shell.nix b/nix/dev-shell.nix index 12c315d63..c439f9b88 100644 --- a/nix/dev-shell.nix +++ b/nix/dev-shell.nix @@ -1,155 +1,157 @@ { mkShell , stdenv , lib , awscli2 , arcanist , better-prompt , boost , bundler , c-ares_cmake-config +, cargo-lambda , cargo-udeps , cmake , cmake-format , cocoapods , corrosion , darwin , double-conversion , emscripten , folly , fmt , glog , grpc , libiconv , libuv , localstack , mariadb , mariadb-up , nodejs , olm , openjdk11 , openssl , pkg-config , protobuf3_21 , python3 , rabbitmq-server , redis , redis-up , rustup , shellcheck , sops , sqlite , terraform , rustfmt , wasm-pack , yarn , protoc-gen-grpc-web }: mkShell { # programs which are meant to be executed should go here nativeBuildInputs = [ # generic development or tools arcanist awscli2 shellcheck sops terraform emscripten # android openjdk11 # node development mariadb nodejs yarn python3 redis wasm-pack protoc-gen-grpc-web # native dependencies # C/CXX toolchains are already brought in with mkShell # Identity Service rustfmt rustup + cargo-lambda cargo-udeps # Tunnelbroker + CMake c-ares_cmake-config cmake cmake-format # linting libuv localstack pkg-config protobuf3_21 grpc rabbitmq-server # runtime service ] ++ lib.optionals stdenv.isDarwin [ cocoapods # needed for ios bundler ]; # include any libraries buildInputs buildInputs = [ # protobuf exposes both a library and a command # thus should appear in both inputs protobuf3_21 corrosion # tunnelbroker double-conversion # tunnelbroker glog # tunnelbroker folly # cpp tools fmt # needed for folly boost # needed for folly olm # needed for CryptoTools sqlite # needed for sqlite_orm openssl # needed for grpc ] ++ lib.optionals stdenv.isDarwin (with darwin.apple_sdk.frameworks; [ CoreFoundation CoreServices Security libiconv # identity service ]); JAVA_HOME = openjdk11.passthru.home; # shell commands to be ran upon entering shell shellHook = '' PRJ_ROOT=$(git rev-parse --show-toplevel) # Set development environment variable defaults source "${../scripts/source_development_defaults.sh}" # Cache development path for some use cases such as XCode "$PRJ_ROOT/scripts/save_path.sh" '' # Darwin condition can be removed once linux services are supported + lib.optionalString stdenv.isDarwin '' # Start MariaDB development services "${mariadb-up}"/bin/mariadb-up & mariadb_pid=$! "${redis-up}"/bin/redis-up & redis_pid=$! wait "$mariadb_pid" "$redis_pid" ${../scripts}/install_homebrew_macos.sh ${../scripts}/install_homebrew_deps.sh watchman '' + '' # Render default configuration for keyserver $PRJ_ROOT/scripts/create_url_facts.sh # Ensure rustup tooling is installed $PRJ_ROOT/scripts/ensure_rustup_setup.sh # Provide decent bash prompt source "${better-prompt}/bin/better-prompt" echo "Welcome to Comm dev environment! :)" ''; } diff --git a/services/commtest/Dockerfile b/services/commtest/Dockerfile index c9b5419bb..02808eb50 100644 --- a/services/commtest/Dockerfile +++ b/services/commtest/Dockerfile @@ -1,41 +1,45 @@ FROM rust:1.70-bullseye RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ build-essential cmake git libgtest-dev libssl-dev zlib1g-dev \ - gnupg software-properties-common + gnupg software-properties-common python3-pip # These steps are required to install terraform RUN wget -O- https://apt.releases.hashicorp.com/gpg | \ gpg --dearmor | \ tee /usr/share/keyrings/hashicorp-archive-keyring.gpg \ && echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] \ https://apt.releases.hashicorp.com $(lsb_release -cs) main" | \ tee /etc/apt/sources.list.d/hashicorp.list \ && apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ terraform && rm -rf /var/lib/apt/lists/* # install aws-cli v2, we must do this manually and per-platform ARG TARGETPLATFORM RUN if [ $(echo $TARGETPLATFORM | cut -d / -f2) = "arm64" ]; then \ curl "https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip" -o "awscliv2.zip"; \ else \ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"; \ fi RUN unzip -q awscliv2.zip && ./aws/install \ && rm -rf awscliv2.zip aws WORKDIR /home/root/app/commtest +# Install cargo lambda +RUN pip3 install cargo-lambda + # Install more recent version of protobuf, must be ran as root COPY scripts/install_protobuf.sh ../../scripts/install_protobuf.sh RUN ../../scripts/install_protobuf.sh - ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse # Copy actual application sources COPY shared ../../shared/ COPY services/terraform/dev ../terraform/dev COPY services/terraform/modules ../terraform/modules COPY services/commtest ./ +COPY services/search-index-lambda ../search-index-lambda + CMD ["bash", "./run-tests-ci.sh"] diff --git a/services/commtest/run-tests-ci.sh b/services/commtest/run-tests-ci.sh index 553b018f9..e126a5de8 100755 --- a/services/commtest/run-tests-ci.sh +++ b/services/commtest/run-tests-ci.sh @@ -1,68 +1,81 @@ #!/bin/env bash set -euo pipefail NUM_FAILURES=0 awscli() { aws --endpoint-url="$LOCALSTACK_ENDPOINT" "$@" } +build_lambdas() { + echo "Building lambdas..." + + pushd ../search-index-lambda >/dev/null + + cargo lambda build --arm64 --output-format zip + + popd >/dev/null +} + # Set up Localstack using Terraform reset_localstack() { echo "Resetting Localstack..." pushd ../terraform/dev >/dev/null terraform init # Force delete secrets due to bug in Localstack where Terraform can't delete them echo "Deleting secrets..." secret_arns=$(awscli secretsmanager list-secrets --query "SecretList[].ARN" --output text) for arn in $secret_arns; do awscli secretsmanager delete-secret --secret-id "$arn" --force-delete-without-recovery done # Reset terraform state echo "Resetting terraform state..." terraform apply -destroy -auto-approve terraform apply -auto-approve popd >/dev/null } run_test() { local exit_code echo "COMMTEST: Running test: $1" set +e RUSTFLAGS=-Awarnings cargo test --test "$1" -- --show-output "${@:2}" exit_code=$? set -e if [ $exit_code -ne 0 ]; then ((NUM_FAILURES += 1)) fi } +# Build lambdas for terraform +build_lambdas + # Reset localstack and then run tests reset_localstack run_test "blob_*" run_test "backup*" run_test "tunnelbroker_*" --test-threads=1 run_test grpc_client_test # below tests are flaky and need to be run in order run_test identity_integration_tests run_test identity_keyserver_tests run_test identity_access_tokens_tests run_test identity_one_time_key_tests run_test identity_prekey_tests run_test identity_tunnelbroker_tests run_test identity_device_list_tests if [ $NUM_FAILURES -eq 0 ]; then echo "COMMTEST: ALL TESTS PASSED" exit 0 else echo "COMMTEST: $NUM_FAILURES TEST SUITES FAILED" exit 1 fi diff --git a/services/docker-compose.yml b/services/docker-compose.yml index 6f7ffe56c..e619b80ce 100644 --- a/services/docker-compose.yml +++ b/services/docker-compose.yml @@ -1,110 +1,111 @@ version: '3.9' volumes: localstack: services: # tunnelbroker tunnelbroker-server: depends_on: - localstack - rabbitmq build: dockerfile: services/tunnelbroker/Dockerfile context: ../ image: commapp/tunnelbroker-server:0.5 ports: - '${COMM_SERVICES_PORT_TUNNELBROKER}:50051' volumes: - $HOME/.aws/config:/home/comm/.aws/config:ro - $HOME/.aws/credentials:/home/comm/.aws/credentials:ro # backup backup-server: platform: linux/amd64 depends_on: - localstack - blob-server build: dockerfile: services/backup/Dockerfile context: ../ image: commapp/backup-server:0.2 ports: - '${COMM_SERVICES_PORT_BACKUP}:50052' volumes: - $HOME/.aws/config:/home/comm/.aws/config:ro - $HOME/.aws/credentials:/home/comm/.aws/credentials:ro # blob blob-server: platform: linux/amd64 depends_on: - localstack build: dockerfile: services/blob/Dockerfile context: ../ image: commapp/blob-server:1.0.0 ports: - '${COMM_SERVICES_PORT_BLOB}:50053' volumes: - $HOME/.aws/config:/home/comm/.aws/config:ro - $HOME/.aws/credentials:/home/comm/.aws/credentials:ro # identity identity-server: platform: linux/amd64 depends_on: - localstack build: dockerfile: services/identity/Dockerfile context: ../ image: commapp/identity-server:0.3 ports: - '${COMM_SERVICES_PORT_IDENTITY}:50054' # feature-flags feature-flags-server: depends_on: - localstack build: dockerfile: services/feature-flags/Dockerfile context: ../ image: commapp/feature-flags:0.1.1 ports: - '${COMM_SERVICES_PORT_FEATURE_FLAGS}:50055' volumes: - $HOME/.aws/config:/home/comm/.aws/config:ro - $HOME/.aws/credentials:/home/comm/.aws/credentials:ro # reports reports-server: platform: linux/amd64 depends_on: - localstack - blob-server build: dockerfile: services/reports/Dockerfile context: ../ image: commapp/reports-server:0.1.0 ports: - '${COMM_SERVICES_PORT_REPORTS}:50056' volumes: - $HOME/.aws/config:/home/comm/.aws/config:ro - $HOME/.aws/credentials:/home/comm/.aws/credentials:ro # localstack localstack: image: localstack/localstack:2.3.2 hostname: localstack ports: - '4566:4566' environment: - - SERVICES=s3,dynamodb + - SERVICES=s3,dynamodb,lambda - LOCALSTACK_HOST=localstack:4566 - PERSISTENCE=1 volumes: - localstack:/var/lib/localstack + - "/var/run/docker.sock:/var/run/docker.sock" # RabbitMQ rabbitmq: # This version matches AWS MQ version (set in Terraform) image: rabbitmq:3.11.16-management hostname: rabbitmq ports: - '5672:5672' - '5671:5671' - '15672:15672' environment: - RABBITMQ_DEFAULT_USER=comm - RABBITMQ_DEFAULT_PASS=comm diff --git a/services/search-index-lambda/.gitignore b/services/search-index-lambda/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/services/terraform/dev/main.tf b/services/terraform/dev/main.tf index 4436a648c..fa0439420 100644 --- a/services/terraform/dev/main.tf +++ b/services/terraform/dev/main.tf @@ -1,52 +1,54 @@ variable "localstack_endpoint" { type = string default = "http://localhost:4566" } locals { aws_settings = ({ region = "us-east-2" access_key = "fake" secret_key = "fake" skip_credentials_validation = true skip_metadata_api_check = true skip_requesting_account_id = true s3_use_path_style = true override_endpoint = var.localstack_endpoint }) } provider "aws" { region = local.aws_settings.region access_key = local.aws_settings.access_key secret_key = local.aws_settings.secret_key skip_credentials_validation = local.aws_settings.skip_credentials_validation skip_metadata_api_check = local.aws_settings.skip_metadata_api_check skip_requesting_account_id = local.aws_settings.skip_requesting_account_id s3_use_path_style = local.aws_settings.s3_use_path_style dynamic "endpoints" { for_each = local.aws_settings.override_endpoint[*] content { + lambda = endpoints.value + ec2 = endpoints.value opensearch = endpoints.value dynamodb = endpoints.value s3 = endpoints.value secretsmanager = endpoints.value } } } provider "random" {} # Shared resources between local dev environment and remote AWS module "shared" { source = "../modules/shared" is_dev = true vpc_id = null cidr_block = null subnet_ids = [] } diff --git a/services/terraform/dev/run.sh b/services/terraform/dev/run.sh index eccf24d64..e5e51ce9e 100755 --- a/services/terraform/dev/run.sh +++ b/services/terraform/dev/run.sh @@ -1,7 +1,12 @@ #!/usr/bin/env bash set -e +cd ../../search-index-lambda +cargo lambda build --arm64 --output-format zip --release + +cd ../terraform/dev + terraform init terraform apply -auto-approve diff --git a/services/terraform/modules/shared/outputs.tf b/services/terraform/modules/shared/outputs.tf index 2f9ac5c01..81b3c71ee 100644 --- a/services/terraform/modules/shared/outputs.tf +++ b/services/terraform/modules/shared/outputs.tf @@ -1,21 +1,26 @@ locals { exported_dynamodb_tables = [ aws_dynamodb_table.feature-flags, aws_dynamodb_table.backup-service-backup, aws_dynamodb_table.reports-service-reports, aws_dynamodb_table.tunnelbroker-undelivered-messages, + aws_dynamodb_table.identity-users, ] } # map table names to their resources output "dynamodb_tables" { value = { for table in local.exported_dynamodb_tables : table.name => table } } output "opensearch_domain_identity" { value = aws_opensearch_domain.identity-search } + +output "search_index_lambda" { + value = aws_lambda_function.search_index_lambda +} diff --git a/services/terraform/modules/shared/search_index_lambda.tf b/services/terraform/modules/shared/search_index_lambda.tf new file mode 100644 index 000000000..46c1017b7 --- /dev/null +++ b/services/terraform/modules/shared/search_index_lambda.tf @@ -0,0 +1,61 @@ +variable "search_index_lambda_iam_role_arn" { + default = "arn:aws:iam::000000000000:role/lambda-role" +} + + +variable "lambda_zip_dir" { + type = string + default = "../../search-index-lambda/target/lambda/search-index-lambda" +} + +resource "aws_lambda_function" "search_index_lambda" { + function_name = "search-index-lambda-function" + filename = "${var.lambda_zip_dir}/bootstrap.zip" + source_code_hash = filebase64sha256("${var.lambda_zip_dir}/bootstrap.zip") + handler = "bootstrap" + role = var.search_index_lambda_iam_role_arn + runtime = "provided.al2" + architectures = ["arm64"] + timeout = 300 + + vpc_config { + subnet_ids = var.subnet_ids + security_group_ids = [aws_security_group.search_index_lambda.id] + } + + environment { + variables = { + RUST_BACKTRACE = "1" + OPENSEARCH_ENDPOINT = aws_opensearch_domain.identity-search.endpoint + } + } + + tracing_config { + mode = "Active" + } +} + +resource "aws_lambda_event_source_mapping" "identity_users_trigger" { + event_source_arn = aws_dynamodb_table.identity-users.stream_arn + function_name = aws_lambda_function.search_index_lambda.arn + starting_position = "LATEST" +} + +resource "aws_security_group" "search_index_lambda" { + name = "search_index_lambda_sg" + vpc_id = var.vpc_id + + egress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } +} + + +resource "aws_lambda_function_event_invoke_config" "search-index-lambda" { + function_name = aws_lambda_function.search_index_lambda.function_name + maximum_event_age_in_seconds = 60 + maximum_retry_attempts = 2 +} diff --git a/services/terraform/remote/aws_iam.tf b/services/terraform/remote/aws_iam.tf index e216d279b..5e5f1ac79 100644 --- a/services/terraform/remote/aws_iam.tf +++ b/services/terraform/remote/aws_iam.tf @@ -1,222 +1,320 @@ ### General AWS Utility IAM resources # Docs: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html resource "aws_iam_role" "ecs_instance_role" { name = "ecsInstanceRole" description = "Allows EC2 instances to call AWS services on your behalf." assume_role_policy = jsonencode({ Version = "2012-10-17" Statement = [ { Action = "sts:AssumeRole" Effect = "Allow" Principal = { Service = "ec2.amazonaws.com" } } ] }) managed_policy_arns = [ "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role", # Let instances download Docker images from ECR "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" ] } # ECS Task execution role # Docs: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html resource "aws_iam_role" "ecs_task_execution" { name = "ecsTaskExecutionRole" assume_role_policy = jsonencode({ Version = "2008-10-17" Statement = [ { Sid = "" Action = "sts:AssumeRole" Effect = "Allow" Principal = { Service = "ecs-tasks.amazonaws.com" } } ] }) managed_policy_arns = [ "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess", "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy", # Let ECS write logs to CloudWatch "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess", # Let ECS tasks access secrets to expose them as env vars "arn:aws:iam::aws:policy/SecretsManagerReadWrite", ] } # Assume Role Policy Document for EC2 and ECS # This policy allows ECS and EC2 use roles that it is assigned to data "aws_iam_policy_document" "assume_role_ecs_ec2" { statement { effect = "Allow" actions = [ "sts:AssumeRole", ] principals { type = "Service" identifiers = [ "ec2.amazonaws.com", "ecs-tasks.amazonaws.com" ] } } } # Allows ECS Exec to SSH into service task containers resource "aws_iam_policy" "allow_ecs_exec" { name = "allow-ecs-exec" description = "Adds SSM permissions to enable ECS Exec" policy = jsonencode({ Version = "2012-10-17" Statement = [ { Effect = "Allow" Action = [ "ssmmessages:CreateControlChannel", "ssmmessages:CreateDataChannel", "ssmmessages:OpenControlChannel", "ssmmessages:OpenDataChannel" ] Resource = "*" } ] }) } ### App IAM resources # Our app role - this is to give access to DynamoDB etc # Has trust policy with EC2 and ECS # Also allows to SSH into containers resource "aws_iam_role" "services_ddb_full_access" { name = "dynamodb-s3-full-access" description = "Full RW access to DDB and S3. Allows to SSH into ECS containers" assume_role_policy = data.aws_iam_policy_document.assume_role_ecs_ec2.json managed_policy_arns = [ aws_iam_policy.allow_ecs_exec.arn, "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess", "arn:aws:iam::aws:policy/AmazonS3FullAccess", ] } # Feature Flags IAM data "aws_iam_policy_document" "read_feature_flags" { statement { sid = "FeatureFlagsDDBReadAccess" effect = "Allow" actions = [ "dynamodb:BatchGetItem", "dynamodb:GetItem", "dynamodb:Query", "dynamodb:Scan", ] resources = [ module.shared.dynamodb_tables["feature-flags"].arn ] } } resource "aws_iam_policy" "read_feature_flags" { name = "feature-flags-ddb-read-access" policy = data.aws_iam_policy_document.read_feature_flags.json description = "Allows full read access to feature-flags DynamoDB table" } resource "aws_iam_role" "feature_flags_service" { name = "feature-flags-service-role" assume_role_policy = data.aws_iam_policy_document.assume_role_ecs_ec2.json managed_policy_arns = [ aws_iam_policy.read_feature_flags.arn ] } # Backup Service IAM data "aws_iam_policy_document" "manage_backup_ddb" { statement { sid = "BackupFullDDBAccess" effect = "Allow" actions = [ "dynamodb:*", ] resources = [ module.shared.dynamodb_tables["backup-service-backup"].arn, "${module.shared.dynamodb_tables["backup-service-backup"].arn}/index/*" ] } } resource "aws_iam_policy" "manage_backup_ddb" { name = "backup-ddb-full-access" policy = data.aws_iam_policy_document.manage_backup_ddb.json description = "Allows full access to backup DynamoDB table" } resource "aws_iam_role" "backup_service" { name = "backup-service-role" assume_role_policy = data.aws_iam_policy_document.assume_role_ecs_ec2.json managed_policy_arns = [ aws_iam_policy.allow_ecs_exec.arn, aws_iam_policy.manage_backup_ddb.arn ] } # Reports Service IAM data "aws_iam_policy_document" "manage_reports_ddb" { statement { sid = "ReportsFullDDBAccess" effect = "Allow" actions = [ "dynamodb:*", ] resources = [ module.shared.dynamodb_tables["reports-service-reports"].arn ] } } resource "aws_iam_policy" "manage_reports_ddb" { name = "reports-ddb-full-access" policy = data.aws_iam_policy_document.manage_reports_ddb.json description = "Allows full access to reports DynamoDB table" } resource "aws_iam_role" "reports_service" { name = "reports-service-role" assume_role_policy = data.aws_iam_policy_document.assume_role_ecs_ec2.json managed_policy_arns = [ aws_iam_policy.allow_ecs_exec.arn, aws_iam_policy.manage_reports_ddb.arn ] } + +data "aws_iam_policy_document" "assume_identity_search_role" { + statement { + effect = "Allow" + + principals { + type = "Service" + identifiers = ["lambda.amazonaws.com"] + } + + actions = ["sts:AssumeRole"] + } +} + +resource "aws_iam_role" "search_index_lambda" { + name = "search_index_lambda" + assume_role_policy = data.aws_iam_policy_document.assume_identity_search_role.json + + managed_policy_arns = [ + aws_iam_policy.manage_cloudwatch_logs.arn, + aws_iam_policy.manage_network_interface.arn, + aws_iam_policy.read_identity_users_stream.arn, + ] +} + +data "aws_iam_policy_document" "read_identity_users_stream" { + statement { + effect = "Allow" + + actions = [ + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:DescribeStream", + "dynamodb:ListStreams", + ] + resources = [ + module.shared.dynamodb_tables["identity-users"].arn, + module.shared.dynamodb_tables["identity-users"].stream_arn, + "${module.shared.dynamodb_tables["identity-users"].arn}/stream/*", + ] + } +} + +resource "aws_iam_policy" "read_identity_users_stream" { + name = "read-identity-users-stream" + path = "/" + description = "IAM policy for managing identity-users stream" + policy = data.aws_iam_policy_document.read_identity_users_stream.json +} + +data "aws_iam_policy_document" "manage_cloudwatch_logs" { + statement { + effect = "Allow" + + actions = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + ] + + resources = ["arn:aws:logs:*:*:*"] + } +} + +resource "aws_iam_policy" "manage_cloudwatch_logs" { + name = "manage-cloudwatch-logs" + path = "/" + description = "IAM policy for managing cloudwatch logs" + policy = data.aws_iam_policy_document.manage_cloudwatch_logs.json +} + +data "aws_iam_policy_document" "manage_network_interface" { + statement { + effect = "Allow" + + actions = [ + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DeleteNetworkInterface" + ] + + resources = ["*"] + } +} + +resource "aws_iam_policy" "manage_network_interface" { + name = "manage-network-interface" + path = "/" + description = "IAM policy for managing network interfaces" + policy = data.aws_iam_policy_document.manage_network_interface.json +} + + +resource "aws_iam_role_policy_attachment" "AWSLambdaVPCAccessExecutionRole" { + role = aws_iam_role.search_index_lambda.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole" +} + data "aws_iam_policy_document" "opensearch_domain_access" { statement { effect = "Allow" principals { type = "*" - identifiers = [] + identifiers = ["${module.shared.search_index_lambda.arn}"] } actions = [ "es:ESHttpHead", "es:ESHttpPost", "es:ESHttpGet", "es:ESHttpDelete", "es:ESHttpPut", ] resources = ["${module.shared.opensearch_domain_identity.arn}/*"] } } resource "aws_opensearch_domain_policy" "opensearch_domain_access" { domain_name = module.shared.opensearch_domain_identity.domain_name access_policies = data.aws_iam_policy_document.opensearch_domain_access.json } diff --git a/services/terraform/remote/main.tf b/services/terraform/remote/main.tf index f8d3e0b11..4160e922e 100644 --- a/services/terraform/remote/main.tf +++ b/services/terraform/remote/main.tf @@ -1,67 +1,68 @@ terraform { backend "s3" { region = "us-east-2" key = "terraform.tfstate" bucket = "commapp-terraform" dynamodb_table = "terraform-lock" encrypt = true } } provider "random" {} provider "sops" {} data "sops_file" "secrets_json" { source_file = "secrets.json" } locals { environment = terraform.workspace is_staging = local.environment == "staging" secrets = jsondecode(data.sops_file.secrets_json.raw) target_account_id = lookup(local.secrets.accountIDs, local.environment) terraform_role_arn = "arn:aws:iam::${local.target_account_id}:role/Terraform" } provider "aws" { region = "us-east-2" assume_role { role_arn = local.terraform_role_arn external_id = "terraform" } # automatically add these tags to all resources default_tags { tags = { # Helps to distinguish which resources are managed by Terraform managed_by = "terraform" } } } locals { # S3 bucket names are globally unique so we add a suffix to staging buckets s3_bucket_name_suffix = local.is_staging ? "-staging" : "" } # Shared resources between local dev environment and remote AWS module "shared" { source = "../modules/shared" bucket_name_suffix = local.s3_bucket_name_suffix - vpc_id = aws_vpc.default.id - cidr_block = aws_vpc.default.cidr_block + vpc_id = aws_vpc.default.id + search_index_lambda_iam_role_arn = aws_iam_role.search_index_lambda.arn + cidr_block = aws_vpc.default.cidr_block subnet_ids = [ aws_subnet.public_a.id, ] } check "workspace_check" { assert { condition = terraform.workspace == "staging" || terraform.workspace == "production" error_message = "Terraform workspace must be either 'staging' or 'production'!" } } diff --git a/services/terraform/remote/run.sh b/services/terraform/remote/run.sh new file mode 100755 index 000000000..e7d97548d --- /dev/null +++ b/services/terraform/remote/run.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -e + +cd ../../search-index-lambda +cargo lambda build --arm64 --output-format zip --release + +cd ../terraform/remote +terraform init +terraform apply