diff --git a/services/terraform/remote/service_blob.tf b/services/terraform/remote/service_blob.tf index f613cdee5..ff8a7536e 100644 --- a/services/terraform/remote/service_blob.tf +++ b/services/terraform/remote/service_blob.tf @@ -1,223 +1,198 @@ locals { blob_service_image_tag = local.is_staging ? "latest" : "0.2.0" blob_service_container_name = "blob-service-server" blob_service_server_image = "commapp/blob-server:${local.blob_service_image_tag}" # HTTP port & configuration for ECS Service Connect blob_service_container_http_port = 50053 blob_sc_port_name = "blob-service-ecs-http" blob_sc_dns_name = "blob-service" # URL accessible by other services in the same Service Connect namespace # This renders to 'http://blob-service:50053' blob_local_url = "http://${local.blob_sc_dns_name}:${local.blob_service_container_http_port}" blob_service_container_grpc_port = 50051 blob_service_grpc_public_port = 50053 blob_service_domain_name = "blob.${local.root_domain}" blob_service_s3_bucket = "commapp-blob${local.s3_bucket_name_suffix}" } resource "aws_ecs_task_definition" "blob_service" { family = "blob-service-task-def" container_definitions = jsonencode([ { name = local.blob_service_container_name image = local.blob_service_server_image essential = true portMappings = [ { name = local.blob_sc_port_name containerPort = local.blob_service_container_http_port protocol = "tcp" appProtocol = "http" } ] environment = [ { name = "RUST_LOG" value = "info" }, { name = "BLOB_S3_BUCKET_NAME", value = local.blob_service_s3_bucket } ] logConfiguration = { "logDriver" = "awslogs" "options" = { "awslogs-create-group" = "true" "awslogs-group" = "/ecs/blob-service-task-def" "awslogs-region" = "us-east-2" "awslogs-stream-prefix" = "ecs" } } } ]) task_role_arn = aws_iam_role.services_ddb_full_access.arn execution_role_arn = aws_iam_role.ecs_task_execution.arn network_mode = "bridge" cpu = "512" memory = "512" requires_compatibilities = ["EC2"] # Set this to true if you want to keep old revisions # when this definition is changed skip_destroy = false } resource "aws_ecs_service" "blob_service" { name = "blob-service" cluster = aws_ecs_cluster.comm_services.id launch_type = "EC2" task_definition = aws_ecs_task_definition.blob_service.arn force_new_deployment = true desired_count = 1 lifecycle { ignore_changes = [desired_count] } # Expose Blob service to other services in the cluster service_connect_configuration { enabled = true service { discovery_name = local.blob_sc_dns_name port_name = local.blob_sc_port_name client_alias { port = local.blob_service_container_http_port dns_name = local.blob_sc_dns_name } } } # HTTP load_balancer { target_group_arn = aws_lb_target_group.blob_service_http.arn container_name = local.blob_service_container_name container_port = local.blob_service_container_http_port } deployment_circuit_breaker { enable = true rollback = true } } # Security group to configure access to the service resource "aws_security_group" "blob_service" { name = "blob-service-ecs-sg" vpc_id = aws_vpc.default.id ingress { from_port = local.blob_service_container_http_port to_port = local.blob_service_container_http_port protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] description = "HTTP port" } # Allow all outbound traffic egress { from_port = 0 to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } lifecycle { create_before_destroy = true } } resource "aws_lb_target_group" "blob_service_http" { name = "blob-service-ecs-http-tg" port = local.blob_service_container_http_port protocol = "HTTP" vpc_id = aws_vpc.default.id # ECS Fargate requires target type set to IP target_type = "instance" health_check { enabled = true healthy_threshold = 2 unhealthy_threshold = 3 protocol = "HTTP" path = "/health" matcher = "200-499" } } # Load Balancer resource "aws_lb" "blob_service" { load_balancer_type = "application" name = "blob-service-lb" internal = false #security_groups = [aws_security_group.blob_service.id] subnets = [ aws_subnet.public_a.id, aws_subnet.public_b.id, aws_subnet.public_c.id, ] } resource "aws_lb_listener" "blob_service_https" { load_balancer_arn = aws_lb.blob_service.arn port = "443" protocol = "HTTPS" ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06" certificate_arn = data.aws_acm_certificate.blob_service.arn default_action { - type = "forward" - - # TODO: Currently weights are set to direct 100% traffic - # to the legacy instance - forward { - # ECS target group - target_group { - arn = aws_lb_target_group.blob_service_http.arn - weight = 1 - } - - # Legacy EC2 Target - dynamic "target_group" { - for_each = data.aws_lb_target_group.blob_service_legacy_ec2 - content { - arn = target_group.value["arn"] - weight = 0 - } - } - } + type = "forward" + target_group_arn = aws_lb_target_group.blob_service_http.arn } lifecycle { # Required only for existing resources to avoid plan difference ignore_changes = [default_action[0].forward[0].stickiness[0].duration] # Target group cannot be destroyed if it is used replace_triggered_by = [aws_lb_target_group.blob_service_http] } } # SSL Certificate data "aws_acm_certificate" "blob_service" { domain = local.blob_service_domain_name statuses = ["ISSUED"] } -# Legacy EC2 instance target -data "aws_lb_target_group" "blob_service_legacy_ec2" { - # We don't have legacy EC2 services in staging - count = local.is_staging ? 0 : 1 - name = "blob-service-http-tg" -} - # Required for Route53 DNS record output "blob_service_load_balancer_dns_name" { value = aws_lb.blob_service.dns_name } diff --git a/services/terraform/remote/service_electron_update.tf b/services/terraform/remote/service_electron_update.tf index ca65d53f9..de53efcd2 100644 --- a/services/terraform/remote/service_electron_update.tf +++ b/services/terraform/remote/service_electron_update.tf @@ -1,202 +1,180 @@ locals { electron_update_container_name = "electron-update-server" electron_update_container_port = 80 electron_update_server_image = "commapp/electron-update-server:1.0" electron_update_domain_name = "electron-update.${local.root_domain}" } # Task definition - defines container resources, ports, # environment variables, docker image etc. resource "aws_ecs_task_definition" "electron_update" { family = "electron-update-task-def" container_definitions = jsonencode([ { name = local.electron_update_container_name image = local.electron_update_server_image essential = true portMappings = [ { name = "electron-update-80-80-http" containerPort = local.electron_update_container_port protocol = "tcp" appProtocol = "http" } ] logConfiguration = { "logDriver" = "awslogs" "options" = { "awslogs-create-group" = "true" "awslogs-group" = "/ecs/electron-update-task-def" "awslogs-region" = "us-east-2" "awslogs-stream-prefix" = "ecs" } } } ]) task_role_arn = null execution_role_arn = aws_iam_role.ecs_task_execution.arn network_mode = "awsvpc" cpu = "256" memory = "512" requires_compatibilities = ["EC2", "FARGATE"] # Set this to true if you want to keep old revisions # when this definition is changed skip_destroy = false } # ECS Service - defines task scaling, load balancer connection, # network configuration etc. resource "aws_ecs_service" "electron_update" { name = "electron-update" cluster = aws_ecs_cluster.comm_services.id launch_type = "FARGATE" task_definition = aws_ecs_task_definition.electron_update.arn force_new_deployment = true desired_count = 1 # Allow external changes without Terraform plan difference # We can freely specify replica count in AWS Console lifecycle { ignore_changes = [desired_count] } load_balancer { target_group_arn = aws_lb_target_group.electron_update_ecs.arn container_name = local.electron_update_container_name container_port = local.electron_update_container_port } network_configuration { assign_public_ip = true security_groups = [ aws_security_group.electron_update.id, ] subnets = [ aws_subnet.public_a.id, aws_subnet.public_b.id, aws_subnet.public_c.id, ] } deployment_circuit_breaker { enable = true rollback = true } } # Security group to configure access to the service resource "aws_security_group" "electron_update" { name = "electron-update-ecs-sg" vpc_id = aws_vpc.default.id ingress { from_port = local.electron_update_container_port to_port = local.electron_update_container_port protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] description = "HTTP port" } # Allow all outbound traffic egress { from_port = 0 to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } lifecycle { create_before_destroy = true } } # Running service instances are registered here # to be accessed by the load balancer resource "aws_lb_target_group" "electron_update_ecs" { name = "electron-update-ecs-tg" port = local.electron_update_container_port protocol = "HTTP" vpc_id = aws_vpc.default.id # ECS Fargate requires target type set to IP target_type = "ip" health_check { enabled = true healthy_threshold = 2 unhealthy_threshold = 3 protocol = "HTTP" # Hazel homepage returns some HTML that can be used as a health check path = "/" matcher = "200" } } # Load Balancer resource "aws_lb" "electron_update" { load_balancer_type = "application" name = "electron-update-lb" internal = false subnets = [ aws_subnet.public_a.id, aws_subnet.public_c.id, # For some reason we don't use this subnet here # aws_subnet.public_b.id, ] } resource "aws_lb_listener" "electron_update_https" { load_balancer_arn = aws_lb.electron_update.arn port = "443" protocol = "HTTPS" ssl_policy = "ELBSecurityPolicy-2016-08" certificate_arn = data.aws_acm_certificate.electron_update.arn default_action { - type = "forward" - forward { - # ECS target group - target_group { - arn = aws_lb_target_group.electron_update_ecs.arn - weight = 10 - } - - # Legacy EC2 Target - dynamic "target_group" { - for_each = data.aws_lb_target_group.electron_update_legacy_ec2 - content { - arn = target_group.value["arn"] - weight = 0 - } - } - } + type = "forward" + target_group_arn = aws_lb_target_group.electron_update_ecs.arn } lifecycle { # Required only for existing resources to avoid plan difference ignore_changes = [default_action[0].forward[0].stickiness[0].duration] } } # SSL Certificate data "aws_acm_certificate" "electron_update" { domain = local.electron_update_domain_name statuses = ["ISSUED"] } -# Legacy EC2 instance target -data "aws_lb_target_group" "electron_update_legacy_ec2" { - # We don't have legacy EC2 services in staging - count = local.is_staging ? 0 : 1 - name = "electron-update-tg" -} - # Required for Route53 DNS record output "electron_update_load_balancer_dns_name" { value = aws_lb.electron_update.dns_name } diff --git a/services/terraform/remote/service_feature_flags.tf b/services/terraform/remote/service_feature_flags.tf index 975cb3090..1d7d5113e 100644 --- a/services/terraform/remote/service_feature_flags.tf +++ b/services/terraform/remote/service_feature_flags.tf @@ -1,209 +1,186 @@ locals { feature_flags_image_tag = local.is_staging ? "latest" : "0.1.1" feature_flags_container_name = "feature-flags-server" feature_flags_container_port = 50055 feature_flags_server_image = "commapp/feature-flags:${local.feature_flags_image_tag}" feature_flags_domain_name = "feature-flags.${local.root_domain}" } # Task definition - defines container resources, ports, # environment variables, docker image etc. resource "aws_ecs_task_definition" "feature_flags" { family = "feature-flags-service-task-def" container_definitions = jsonencode([ { name = local.feature_flags_container_name image = local.feature_flags_server_image essential = true portMappings = [ { name = "feature-flags-http" containerPort = local.feature_flags_container_port protocol = "tcp" appProtocol = "http" } ] environment = [ { name = "RUST_LOG" value = "info" } ] logConfiguration = { "logDriver" = "awslogs" "options" = { "awslogs-create-group" = "true" "awslogs-group" = "/ecs/feature-flags-task-def" "awslogs-region" = "us-east-2" "awslogs-stream-prefix" = "ecs" } } } ]) task_role_arn = aws_iam_role.feature_flags_service.arn execution_role_arn = aws_iam_role.ecs_task_execution.arn network_mode = "awsvpc" cpu = "256" memory = "512" requires_compatibilities = ["EC2", "FARGATE"] # Set this to true if you want to keep old revisions # when this definition is changed skip_destroy = true } # ECS Service - defines task scaling, load balancer connection, # network configuration etc. resource "aws_ecs_service" "feature_flags" { name = "feature-flags-service" cluster = aws_ecs_cluster.comm_services.id launch_type = "FARGATE" task_definition = aws_ecs_task_definition.feature_flags.arn force_new_deployment = true desired_count = 1 # Allow external changes without Terraform plan difference # We can freely specify replica count in AWS Console lifecycle { ignore_changes = [desired_count] } load_balancer { target_group_arn = aws_lb_target_group.feature_flags_ecs.arn container_name = local.feature_flags_container_name container_port = local.feature_flags_container_port } network_configuration { assign_public_ip = true security_groups = [ aws_security_group.feature_flags.id, ] subnets = [ aws_subnet.public_a.id, aws_subnet.public_b.id, aws_subnet.public_c.id, ] } deployment_circuit_breaker { enable = true rollback = true } } # Running service instances are registered here # to be accessed by the load balancer resource "aws_lb_target_group" "feature_flags_ecs" { name = "feature-flags-ecs-tg" port = local.feature_flags_container_port protocol = "HTTP" vpc_id = aws_vpc.default.id # ECS Fargate requires target type set to IP target_type = "ip" health_check { enabled = true healthy_threshold = 2 unhealthy_threshold = 3 protocol = "HTTP" # The features endpoint should return HTTP 400 # if no platform, staff, code version is specified path = "/features" matcher = "200-499" } } # Security group to configure access to the service resource "aws_security_group" "feature_flags" { name = "feature-flags-service-sg" vpc_id = aws_vpc.default.id ingress { from_port = local.feature_flags_container_port to_port = local.feature_flags_container_port protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] description = "HTTP port" } # Allow all outbound traffic egress { from_port = 0 to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } lifecycle { create_before_destroy = true } } # Load Balancer resource "aws_lb" "feature_flags" { load_balancer_type = "application" name = "feature-flags-service-lb" internal = false #security_groups = [aws_security_group.feature_flags.id] subnets = [ aws_subnet.public_a.id, aws_subnet.public_b.id, aws_subnet.public_c.id, ] } resource "aws_lb_listener" "feature_flags_https" { load_balancer_arn = aws_lb.feature_flags.arn port = "443" protocol = "HTTPS" ssl_policy = "ELBSecurityPolicy-2016-08" certificate_arn = data.aws_acm_certificate.feature_flags.arn default_action { - type = "forward" - - forward { - # ECS target group - target_group { - arn = aws_lb_target_group.feature_flags_ecs.arn - weight = 10 - } - - # Legacy EC2 Target - dynamic "target_group" { - for_each = data.aws_lb_target_group.feature_flags_legacy_ec2 - content { - arn = target_group.value["arn"] - weight = 0 - } - } - } + type = "forward" + target_group_arn = aws_lb_target_group.feature_flags_ecs.arn } lifecycle { - # Required only for existing resources to avoid plan difference - ignore_changes = [default_action[0].forward[0].stickiness[0].duration] + ignore_changes = [default_action[0].forward[0].stickiness[0].duration] + replace_triggered_by = [aws_lb_target_group.feature_flags_ecs] } } # SSL Certificate data "aws_acm_certificate" "feature_flags" { domain = local.feature_flags_domain_name statuses = ["ISSUED"] } -# Legacy EC2 instance target -data "aws_lb_target_group" "feature_flags_legacy_ec2" { - # We don't have legacy EC2 services in staging - count = local.is_staging ? 0 : 1 - name = "feature-flags-service-tg" -} - # Required for Route53 DNS record output "feature_flags_load_balancer_dns_name" { value = aws_lb.feature_flags.dns_name } diff --git a/services/terraform/remote/service_identity.tf b/services/terraform/remote/service_identity.tf index b2b41b5be..9513f962c 100644 --- a/services/terraform/remote/service_identity.tf +++ b/services/terraform/remote/service_identity.tf @@ -1,231 +1,210 @@ locals { identity_service_image_tag = "0.3" identity_service_server_image = "commapp/identity-server:${local.identity_service_image_tag}" identity_service_container_name = "identity-server" # Port that the container is listening on identity_service_container_grpc_port = 50054 identity_sc_port_name = "identity-service-ecs-grpc" identity_sc_dns_name = "identity-service" # Endpoint name accessible by other services in the same Service Connect namespace # This renders to e.g. 'identity-service:50054' identity_local_endpoint = "${local.identity_sc_dns_name}:${local.identity_service_container_grpc_port}" # Port that is exposed to the public SSL endpoint (appended to domain name) identity_service_grpc_public_port = 50054 identity_service_domain_name = "identity.${local.root_domain}" opaque_server_setup_secret_name = "identity/ServerSetup" } data "aws_secretsmanager_secret" "identity_server_setup" { name = local.opaque_server_setup_secret_name } resource "aws_ecs_task_definition" "identity_service" { family = "identity-service-task-def" container_definitions = jsonencode([ { name = local.identity_service_container_name image = local.identity_service_server_image essential = true portMappings = [ { name = local.identity_sc_port_name containerPort = local.identity_service_container_grpc_port protocol = "tcp" appProtocol = "grpc" } ] environment = [ { name = "RUST_LOG" value = "info" }, { name = "KEYSERVER_PUBLIC_KEY" value = nonsensitive(local.secrets["keyserverPublicKey"]) } ] secrets = [ { # This is exposed as an environment variable in the container name = "OPAQUE_SERVER_SETUP" valueFrom = data.aws_secretsmanager_secret.identity_server_setup.arn } ] logConfiguration = { "logDriver" = "awslogs" "options" = { "awslogs-create-group" = "true" "awslogs-group" = "/ecs/identity-service-task-def" "awslogs-region" = "us-east-2" "awslogs-stream-prefix" = "ecs" } } linuxParameters = { initProcessEnabled = true } } ]) task_role_arn = aws_iam_role.services_ddb_full_access.arn execution_role_arn = aws_iam_role.ecs_task_execution.arn network_mode = "bridge" cpu = "512" memory = "512" requires_compatibilities = ["EC2"] # Set this to true if you want to keep old revisions # when this definition is changed skip_destroy = false } resource "aws_ecs_service" "identity_service" { name = "identity-service" cluster = aws_ecs_cluster.comm_services.id launch_type = "EC2" task_definition = aws_ecs_task_definition.identity_service.arn force_new_deployment = true desired_count = 1 lifecycle { ignore_changes = [desired_count] } # Expose Identity service to other services in the cluster service_connect_configuration { enabled = true service { discovery_name = local.identity_sc_dns_name port_name = local.identity_sc_port_name client_alias { port = local.identity_service_container_grpc_port dns_name = local.identity_sc_dns_name } } } load_balancer { target_group_arn = aws_lb_target_group.identity_service_grpc.arn container_name = local.identity_service_container_name container_port = local.identity_service_container_grpc_port } deployment_circuit_breaker { enable = true rollback = true } enable_execute_command = true enable_ecs_managed_tags = true } # Security group to configure access to the service resource "aws_security_group" "identity_service" { name = "identity-service-ecs-sg" vpc_id = aws_vpc.default.id ingress { from_port = local.identity_service_container_grpc_port to_port = local.identity_service_container_grpc_port protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] description = "gRPC port" } # Allow all outbound traffic egress { from_port = 0 to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } lifecycle { create_before_destroy = true } } resource "aws_lb_target_group" "identity_service_grpc" { name = "identity-service-ecs-grpc-tg" port = local.identity_service_container_grpc_port protocol = "HTTP" protocol_version = "GRPC" vpc_id = aws_vpc.default.id # The "bridge" network mode requires target type set to instance target_type = "instance" health_check { enabled = true healthy_threshold = 2 unhealthy_threshold = 3 } } # Load Balancer resource "aws_lb" "identity_service" { load_balancer_type = "application" name = "identity-service-lb" internal = false subnets = [ aws_subnet.public_a.id, aws_subnet.public_b.id, aws_subnet.public_c.id, ] } resource "aws_lb_listener" "identity_service_grpc" { load_balancer_arn = aws_lb.identity_service.arn port = local.identity_service_grpc_public_port protocol = "HTTPS" ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06" certificate_arn = data.aws_acm_certificate.identity_service.arn default_action { - type = "forward" - forward { - # ECS target group - target_group { - arn = aws_lb_target_group.identity_service_grpc.arn - weight = 1 - } - # Legacy EC2 Target - dynamic "target_group" { - for_each = data.aws_lb_target_group.identity_service_legacy_ec2 - content { - arn = target_group.value["arn"] - weight = 0 - } - } - } + type = "forward" + target_group_arn = aws_lb_target_group.identity_service_grpc.arn } lifecycle { # Required only for existing resources to avoid plan difference ignore_changes = [default_action[0].forward[0].stickiness[0].duration] # Target group cannot be destroyed if it is used replace_triggered_by = [aws_lb_target_group.identity_service_grpc] } } # SSL Certificate data "aws_acm_certificate" "identity_service" { domain = local.identity_service_domain_name statuses = ["ISSUED"] } -# Legacy EC2 instance target -data "aws_lb_target_group" "identity_service_legacy_ec2" { - # We don't have legacy EC2 services in staging - count = local.is_staging ? 0 : 1 - name = "identity-service-tg" -} - # Required for Route53 DNS record output "identity_service_load_balancer_dns_name" { value = aws_lb.identity_service.dns_name }