Просмотр исходного кода

Merge pull request #273 from mdr-engineering/hotfix/ftd_na_KeepLegacyWAFForPortal

Preserved Portal with Legacy WAF Configuration
Frederick Damstra 3 лет назад
Родитель
Сommit
adbe93d299

+ 1 - 1
base/customer_portal/rds.tf

@@ -24,7 +24,7 @@ resource "aws_db_instance" "postgres" {
   deletion_protection         = var.environment == "test" ? "false" : "true"
   delete_automated_backups    = "true"
   engine                      = "postgres"
-  engine_version              = var.environment == "test" ? "12.7" : "10.15"
+  engine_version              = var.environment == "test" ? "12" : "10"
   final_snapshot_identifier   = "customerportal"
   instance_class              = "db.t2.small"
   identifier                  = "customerportal"

+ 6 - 0
base/customer_portal_legacywaf/README.md

@@ -0,0 +1,6 @@
+# TEMPORARY!
+
+This directory should be deleted once the portal is migrated to WAFv2, which is in testing as of
+late September, 2021.
+
+To cutover, edit ~/xdr-terraform-live/prod/aws-us-gov/mdr-prod-c2/200-customer-portal/terragrunt.hcl

+ 1 - 0
base/customer_portal_legacywaf/amis.tf

@@ -0,0 +1 @@
+../amis.tf

+ 35 - 0
base/customer_portal_legacywaf/certificate.tf

@@ -0,0 +1,35 @@
+#Certificate 
+resource "aws_acm_certificate" "cert" {
+  domain_name       = "portal.${var.dns_info["public"]["zone"]}"
+  validation_method = "DNS"
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_acm_certificate_validation" "cert" {
+  certificate_arn         = aws_acm_certificate.cert.arn
+  validation_record_fqdns = [for record in aws_route53_record.cert_validation: record.fqdn]
+}
+
+resource "aws_route53_record" "cert_validation" {
+  provider = aws.mdr-common-services-commercial
+
+  for_each = {
+    for dvo in aws_acm_certificate.cert.domain_validation_options : dvo.domain_name => {
+      name   = dvo.resource_record_name
+      record = dvo.resource_record_value
+      type   = dvo.resource_record_type
+    }
+  }
+
+  allow_overwrite = true
+  name            = each.value.name
+  records         = [each.value.record]
+  ttl             = 60
+  type            = each.value.type
+  zone_id         = var.dns_info["public"]["zone_id"]
+}

+ 98 - 0
base/customer_portal_legacywaf/cloud-init/cloud-init.tpl

@@ -0,0 +1,98 @@
+#cloud-config
+preserve_hostname: false
+salt-master: ${salt_master}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.${ aws_region }.amazonaws.com,ec2messages.${ aws_region }.amazonaws.com,ec2.${ aws_region }.amazonaws.com,ssmmessages.${ aws_region }.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_region: ${ aws_region }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+bootcmd:
+ - "INSTANCE_ID=`/usr/bin/curl -f --connect-timeout 1 --silent http://169.254.169.254/latest/meta-data/instance-id | tail -c 3`"
+ - "/bin/hostnamectl set-hostname customer-portal-$INSTANCE_ID'.${zone}'"
+ - "echo customer-portal-$INSTANCE_ID'.${zone}' > /etc/salt/minion_id"
+
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+ # Allow Salt to setup Portal for autoscaling group
+ - "/bin/echo MARKER: START SALT SYNC"
+ - /bin/salt-call saltutil.sync_all refresh=True
+ # Chicken/egg problem. We need pillars to get correct grains, and grains to get correct pillars.
+ # Sleep needs to be this long due to the magical forces inside of the salt master that govern pillars. 
+ # It takes 30 minutes to fully start portal docker container. 
+ - /bin/sleep 420
+ - /bin/salt-call --refresh-grains-cache saltutil.refresh_modules
+ - /bin/sleep 60
+ - /bin/salt-call --refresh-grains-cache saltutil.refresh_grains
+ - /bin/sleep 60
+ - /bin/salt-call --refresh-grains-cache saltutil.refresh_pillar
+ - /bin/sleep 60
+  # Recording our initial values is useful for troubleshooting
+ - /bin/salt-call pillar.get aws_registry_account --out=text > /root/pillar.aws_registry_account.yml
+ - /bin/salt-call pillar.items > /root/pillars.initial_highstate.yml
+ - /bin/salt-call grains.items > /root/grains.initial_highstate.yml
+ - "/bin/echo MARKER: START FIRST HIGHSTATE"
+ - /bin/salt-call state.highstate
+ - "/bin/echo MARKER: END FIRST HIGHSTATE"
+ - "/bin/echo MARKER: START SECOND HIGHSTATE"
+ - /bin/salt-call state.highstate
+ - "/bin/echo MARKER: END SECOND HIGHSTATE"
+
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 111 - 0
base/customer_portal_legacywaf/ecr.tf

@@ -0,0 +1,111 @@
+locals {
+  registries = [
+    "portal_server",
+    "django_nginx", 
+  ]
+}
+
+data "aws_vpc_endpoint_service" "ecr_api_endpoint" {
+  service = "ecr.api"
+}
+
+data "aws_vpc_endpoint_service" "ecr_dkr_endpoint" {
+  service = "ecr.dkr"
+}
+
+resource "aws_iam_instance_profile" "portal_server_instance_profile" {
+  name     = "portal_server-instance-profile"
+  role     = aws_iam_role.portal_server.name
+}
+
+resource "aws_iam_role" "portal_server" {
+  name     = "portal-instance-role"
+
+  assume_role_policy = <<EOF
+{   
+    "Version": "2012-10-17",
+    "Statement": [
+      {   
+        "Sid": "", 
+        "Effect": "Allow",
+        "Principal": {
+          "Service": [
+            "ec2.amazonaws.com",
+            "ssm.amazonaws.com"
+            ]
+        },
+        "Action": "sts:AssumeRole"
+      }
+    ]
+  }
+EOF
+}
+
+data "aws_iam_policy_document" "portal_server_ecr_policy" {
+  statement {
+    actions = [
+      "ecr:GetAuthorizationToken",
+    ]
+
+    resources = ["*"]
+  }
+
+  statement {
+    sid    = "AllowCommunicationECR"
+    effect = "Allow"
+
+    actions = [
+			"ecr:BatchCheckLayerAvailability",
+			"ecr:GetDownloadUrlForLayer",
+			"ecr:GetRepositoryPolicy",
+			"ecr:DescribeRepositories",
+			"ecr:ListImages",
+			"ecr:DescribeImages",
+			"ecr:BatchGetImage",
+			"ecr:InitiateLayerUpload",
+			"ecr:UploadLayerPart",
+			"ecr:CompleteLayerUpload",
+			"ecr:PutImage"
+    ]
+
+    resources = [
+      "arn:${var.aws_partition}:ecr:${var.aws_region}:${var.common_services_account}:repository/portal_server",
+      "arn:${var.aws_partition}:ecr:${var.aws_region}:${var.common_services_account}:repository/django_nginx"
+    ]
+  }
+
+  statement {
+    sid    = "Tags"
+    effect = "Allow"
+
+    actions = [
+      "ec2:DescribeTags",
+      "ec2:DescribeInstances"
+    ]
+    resources = [
+      "*"
+    ]
+  }
+}
+
+resource "aws_iam_policy" "portal_server_ecr_policy" {
+  name     = "portal_server_ecr"
+  path     = "/"
+  policy   = data.aws_iam_policy_document.portal_server_ecr_policy.json
+}
+
+resource "aws_iam_role_policy_attachment" "portal_server_ecr" {
+  role       = aws_iam_role.portal_server.name
+  policy_arn = aws_iam_policy.portal_server_ecr_policy.arn
+}
+
+data "aws_iam_policy" "default_instance_policy_s3_binaries" {
+  name        = "default_instance_s3_binaries"
+  path_prefix = "/launchroles/"
+}
+
+resource "aws_iam_role_policy_attachment" "portal_server_s3_binaries" {
+  role       = aws_iam_role.portal_server.name
+  policy_arn = data.aws_iam_policy.default_instance_policy_s3_binaries.arn
+}
+

+ 152 - 0
base/customer_portal_legacywaf/elb.tf

@@ -0,0 +1,152 @@
+
+# ---------------------------------------------------------------------------------------------------------------------
+# LOAD BALANCER FOR PORTAL
+# ---------------------------------------------------------------------------------------------------------------------
+resource "aws_alb" "portal" {
+  name            = "portal-alb-${var.environment}"
+  security_groups = [ aws_security_group.customer_portal_alb.id, ]
+  internal        = false 
+  subnets         = var.public_subnets
+
+  tags = merge( var.standard_tags, var.tags, { Name = "portal-alb-${var.environment}" })
+
+  access_logs {
+    bucket = "xdr-elb-${ var.environment }"
+    prefix = ""
+    enabled = true
+  }
+}
+
+# Create a new target group
+resource "aws_alb_target_group" "portal" {
+  name                 = "portal-alb-targets-${var.environment}"
+  port                 = 443 
+  protocol             = "HTTPS"
+  vpc_id               = var.vpc_id
+
+  health_check {
+    protocol = "HTTPS"
+    path    = "/api/health/"
+    matcher = "200-400"
+  }
+
+  stickiness {
+    type    = "lb_cookie"
+    enabled = false 
+  }
+
+  tags = merge( var.standard_tags, var.tags, )
+}
+
+# Create a new alb listener ( certificate_arn wait for DNS cut over )
+resource "aws_alb_listener" "portal_https" {
+  load_balancer_arn = aws_alb.portal.arn
+  port              = "443"
+  protocol          = "HTTPS"
+  ssl_policy        = "ELBSecurityPolicy-TLS-1-2-2017-01"
+  certificate_arn   = aws_acm_certificate.cert.arn
+
+  default_action {
+    target_group_arn = aws_alb_target_group.portal.arn
+    type             = "forward"
+  }
+}
+
+# HTTPs Redirect
+resource "aws_lb_listener" "portal_https_redirect" {
+  load_balancer_arn = aws_alb.portal.arn
+  port              = "80"
+  protocol          = "HTTP"
+
+  default_action {
+    type = "redirect"
+
+    redirect {
+      port        = "443"
+      protocol    = "HTTPS"
+      status_code = "HTTP_301"
+    }
+  }
+}
+
+# Attach the instances to the ELB
+resource "aws_autoscaling_attachment" "customer_portal_asg_attachment" {
+  alb_target_group_arn = aws_alb_target_group.portal.arn
+  autoscaling_group_name = aws_autoscaling_group.customer_portal.name
+}
+
+#----------
+# DNS Entry
+#----------
+module "public_dns_record" {
+  source = "../../submodules/dns/public_ALIAS_record"
+
+  name = "portal"
+  target_dns_name = aws_alb.portal.dns_name
+  target_zone_id  = aws_alb.portal.zone_id
+  dns_info = var.dns_info
+
+  providers = {
+    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+  }
+}
+
+#------------------------------------
+# Security Group
+#------------------------------------
+
+resource "aws_security_group" "customer_portal_alb" {
+  name        = "customer_portal_alb_inbound_sg"
+  description = "Allow Customer Portal ALB HTTP Traffic Inbound"
+  vpc_id      = var.vpc_id
+}
+
+resource "aws_security_group_rule" "customer_portal_alb_https" {
+  protocol          = "tcp"
+  type              = "ingress"
+  from_port         = 443
+  to_port           = 443
+  security_group_id = aws_security_group.customer_portal_alb.id
+  cidr_blocks       = [ var.environment == "test" ? "10.0.0.0/8" : "0.0.0.0/0",  ]
+}
+
+#Allow viewing of test portal from home. We don't want world to view test portal.
+resource "aws_security_group_rule" "customer_portal_alb_https_test" {
+  protocol          = "tcp"
+  type              = "ingress"
+  from_port         = 443
+  to_port           = 443
+  security_group_id = aws_security_group.customer_portal_alb.id
+  cidr_blocks       = flatten(concat(var.portal_test_whitelist, formatlist("%s/32",var.nat_public_ips)))
+}
+
+## Needed for HTTPs redirect
+resource "aws_security_group_rule" "customer_portal_alb_http" {
+  protocol          = "tcp"
+  type              = "ingress"
+  from_port         = 80
+  to_port           = 80
+  security_group_id = aws_security_group.customer_portal_alb.id
+  cidr_blocks       = [ var.environment == "test" ? "10.0.0.0/8" : "0.0.0.0/0", ]
+}
+
+# Needed for Sensu Check from the proxy in test
+resource "aws_security_group_rule" "customer_portal_sensu_check" {
+  count = var.environment == "test" ? 1 : 0
+  protocol          = "tcp"
+  type              = "ingress"
+  from_port         = 443
+  to_port           = 443
+  security_group_id = aws_security_group.customer_portal_alb.id
+  cidr_blocks       = [ "${var.proxy_public_ip}/32", ]
+}
+
+resource "aws_security_group_rule" "customer_portal_alb" {
+  protocol                 = "tcp"
+  type                     = "egress"
+  from_port                = 443
+  to_port                  = 443
+  security_group_id        = aws_security_group.customer_portal_alb.id
+  source_security_group_id = aws_security_group.customer_portal.id
+}
+

+ 288 - 0
base/customer_portal_legacywaf/main.tf

@@ -0,0 +1,288 @@
+# Some instance variables
+locals {
+  ami_selection       = "minion" # master, minion, ...
+}
+
+# Rather than pass in the aws security group, we just look it up. This will
+# probably be useful other places, as well.
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+#------------------------------------
+# EC2 ASG 
+#------------------------------------
+resource "aws_launch_template" "customer_portal" {
+  name          = "customer-portal-lt"
+  instance_type = var.instance_type
+  image_id      = local.ami_map[local.ami_selection]
+  user_data     = data.template_cloudinit_config.cloud-init.rendered
+  ebs_optimized = true
+  tags          = merge(var.standard_tags, var.tags)
+  key_name      = "msoc-build"
+
+  iam_instance_profile {
+    name = aws_iam_instance_profile.portal_server_instance_profile.name
+  }
+
+  network_interfaces {
+    delete_on_termination       = true
+    associate_public_ip_address = false
+    security_groups             = [ data.aws_security_group.typical-host.id, aws_security_group.customer_portal.id ]
+  }
+
+  block_device_mappings {
+    device_name = "/dev/sda1"
+
+    ebs {
+      volume_type = "gp3"
+      volume_size = "100"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+    }
+  }
+  block_device_mappings {
+    # swap
+    device_name = "/dev/xvdm"
+
+    ebs {
+      volume_type = "gp3"
+      volume_size = "8"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+      # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+      # This may prompt replacement when the AMI is updated.
+      # See:
+      #   https://github.com/hashicorp/terraform/issues/19958
+      #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+      #snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+    }
+  }
+  block_device_mappings {
+    # /home
+    device_name = "/dev/xvdn"
+
+    ebs {
+      volume_type = "gp3"
+      volume_size = "4"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+      #snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+    }
+  }
+  block_device_mappings {
+    # /var
+    device_name = "/dev/xvdo"
+
+    ebs {
+      volume_type = "gp3"
+      volume_size = "15"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+      #snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+    }
+  }
+  block_device_mappings {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+
+    ebs {
+      volume_type = "gp3"
+      volume_size = "4"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+      #snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+    }
+  }
+  block_device_mappings {
+    # /var/log
+    device_name = "/dev/xvdq"
+
+    ebs {
+      volume_type = "gp3"
+      volume_size = "8"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+      #snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+    }
+  }
+  block_device_mappings {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+
+    ebs {
+      volume_type = "gp3"
+      volume_size = "8"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+      #snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+    }
+  }
+  block_device_mappings {
+    # /tmp
+    device_name = "/dev/xvds"
+
+    ebs {
+      volume_type = "gp3"
+      volume_size = "4"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+      #snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+    }
+  }
+
+  tag_specifications {
+    resource_type = "instance"
+    tags = merge(var.tags, { "Name": var.instance_name }) # This may have no effect?
+  }
+
+  tag_specifications {
+    resource_type = "volume"
+    tags = merge(var.tags, { "Name": var.instance_name }) # This may have no effect
+  }
+
+  lifecycle {
+    create_before_destroy = true
+  }
+}
+
+resource "aws_autoscaling_group" "customer_portal" {
+  name = "customer-portal-asg"
+  launch_template {
+    id = aws_launch_template.customer_portal.id
+    version = "$Latest"
+  }
+  vpc_zone_identifier = var.private_subnets
+  min_size                  = 1
+  max_size                  = 2
+  desired_capacity          = 2
+  wait_for_capacity_timeout = 0
+  health_check_type         = "EC2"
+  tag {
+    key = "Name"
+    value = var.instance_name
+    propagate_at_launch = true
+    }
+  
+  # Must ignore changes to attachments, or tf will flip flop
+  lifecycle {
+    ignore_changes = [ load_balancers, target_group_arns ]
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud-init" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = templatefile("${path.module}/cloud-init/cloud-init.tpl",
+      {
+        zone = var.dns_info["private"]["zone"]
+        environment = var.environment
+        salt_master  = var.salt_master
+        proxy = var.proxy
+        aws_partition = var.aws_partition
+        aws_partition_alias = var.aws_partition_alias
+        aws_region = var.aws_region
+      }
+    )
+  }
+
+  # Additional parts as needed
+  #part {
+  #  content_type = "text/x-shellscript"
+  #  content      = "ffbaz"
+  #}
+}
+
+
+
+#------------------------------------
+# S3 Bucket  What is this used for? Uncomment if needed. 
+#------------------------------------
+# resource "aws_s3_bucket" "customer-portal" {
+#   bucket = "dps-customer-portal-${terraform.workspace}"
+#   acl    = "private"
+
+#   tags = merge(var.standard_tags, var.tags, )
+# }
+
+#------------------------------------
+# Security Groups
+#------------------------------------
+
+resource "aws_security_group" "customer_portal" {
+  name        = "customer_portal_http_inbound_sg"
+  description = "Allow Customer Portal HTTP Inbound From ALB"
+  vpc_id      = var.vpc_id
+}
+
+resource "aws_security_group_rule" "customer_portal" {
+  protocol                 = "tcp"
+  type                     = "ingress"
+  from_port                = 443
+  to_port                  = 443
+  security_group_id        = aws_security_group.customer_portal.id
+  source_security_group_id = aws_security_group.customer_portal_alb.id
+}
+
+resource "aws_security_group_rule" "customer_portal_postgres_outbound" {
+  type                     = "egress"
+  from_port                = 5432
+  to_port                  = 5432
+  protocol                 = "tcp"
+  security_group_id = aws_security_group.customer_portal.id
+  source_security_group_id = aws_security_group.postgres.id
+}
+
+resource "aws_security_group_rule" "customer_portal_http_outbound" {
+  type        = "egress"
+  from_port   = 80
+  to_port     = 80
+  protocol    = "tcp"
+  cidr_blocks = ["0.0.0.0/0"]
+  security_group_id = aws_security_group.customer_portal.id
+}
+
+resource "aws_security_group_rule" "customer_portal_https_outbound" {
+  type        = "egress"
+  from_port   = 443
+  to_port     = 443
+  protocol    = "tcp"
+  cidr_blocks = ["0.0.0.0/0"]
+  security_group_id = aws_security_group.customer_portal.id
+}
+
+resource "aws_security_group_rule" "customer_portal_smtps_outbound" {
+  type        = "egress"
+  from_port   = 465
+  to_port     = 465
+  protocol    = "tcp"
+  cidr_blocks = ["0.0.0.0/0"]
+  security_group_id = aws_security_group.customer_portal.id
+}
+
+### Output environment ID for purposes
+#output portal_env_id {
+#  value = "${aws_elastic_beanstalk_environment.mdr-customer-portal-env.id}"
+#}
+

+ 1 - 0
base/customer_portal_legacywaf/outputs.tf

@@ -0,0 +1 @@
+

+ 62 - 0
base/customer_portal_legacywaf/rds.tf

@@ -0,0 +1,62 @@
+#------------------------------------
+# RDS Cluster
+#------------------------------------
+resource "aws_kms_key" "customer_portal_kms" {
+  description = "RDS KMS Key"
+  enable_key_rotation = true
+}
+
+resource "aws_db_subnet_group" "customer_portal_rds_subnets" {
+  name        = "customer_portal_rds_subnets"
+  description = "Customer Portal RDS Private subnet"
+  subnet_ids  = var.private_subnets
+}
+
+# yeah, I alphabatized it. Don't you alphabatized your config files? 
+resource "aws_db_instance" "postgres" {
+  allocated_storage           = 20
+  apply_immediately           = "true"
+  auto_minor_version_upgrade  = "true"
+  db_subnet_group_name        = aws_db_subnet_group.customer_portal_rds_subnets.name
+  backup_window               = "03:00-06:00"
+  backup_retention_period     = 7
+  ca_cert_identifier          = "rds-ca-2017"
+  deletion_protection         = var.environment == "test" ? "false" : "true"
+  delete_automated_backups    = "true"
+  engine                      = "postgres"
+  engine_version              = var.environment == "test" ? "12" : "10"
+  final_snapshot_identifier   = "customerportal"
+  instance_class              = "db.t2.small"
+  identifier                  = "customerportal"
+  kms_key_id                  = aws_kms_key.customer_portal_kms.arn
+  maintenance_window          = "Mon:00:00-Mon:03:00"
+  name                        = "customerportal"
+  password                    = var.environment == "test" ? "foobarbaz" : "050ff734-fb33-9248-13e4-7d8ad2e899a0"
+  port                        = 5432
+  skip_final_snapshot         = var.environment == "test" ? "true" : "false"
+  storage_type                = "gp2"
+  storage_encrypted           = "true"
+  tags                        = merge( var.standard_tags, var.tags )
+  username                    = "portal"
+  vpc_security_group_ids      = [ aws_security_group.postgres.id, ]
+}
+
+#------------------------------------
+# Security Groups
+#------------------------------------
+
+resource "aws_security_group" "postgres" {
+  name        = "customer_portal_postgres_inbound_sg"
+  description = "Allow Customer Portal HTTP Traffic Inbound"
+  vpc_id      = var.vpc_id
+}
+
+resource "aws_security_group_rule" "customer_portal_postgres_inbound" {
+  security_group_id = aws_security_group.postgres.id
+
+  type        = "ingress"
+  from_port   = 5432
+  to_port     = 5432
+  protocol    = "tcp"
+  cidr_blocks = ["10.0.0.0/8"]
+}

+ 54 - 0
base/customer_portal_legacywaf/vars.tf

@@ -0,0 +1,54 @@
+variable "tags" { type = map }
+variable "dns_info" { type = map }
+variable "cidr_map" { type = map }
+variable "instance_termination_protection" { type = bool }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "trusted_ips" { type = list }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "aws_account_id" { type = string }
+variable "common_services_account" { type = string }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+variable "portal_test_whitelist" { type = list }
+
+variable "nat_public_ips" { type = list }
+
+variable "admin_remote_ipset" {
+  type = list(object(
+    {
+      value = string
+      type  = string
+    }))
+}
+
+variable "instance_name" {
+  description = "Hostname, DNS entry, etc."
+  type = string
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "private_subnets" {
+  type = list(string)
+}
+
+variable "public_subnets" {
+  type = list(string)
+}
+
+variable "proxy_public_ip" {
+  type = string
+  
+}
+
+

+ 34 - 0
base/customer_portal_legacywaf/waf.tf

@@ -0,0 +1,34 @@
+# This can be removed once WAFv2 is enabled in prod
+ locals {
+  blacklisted_ips = [
+    {
+      "value" = "172.16.0.0/16"
+      type    = "IPV4"
+    },
+    {
+      "value" = "192.168.0.0/16"
+      type    = "IPV4"
+    },
+    {
+      "value" = "169.254.0.0/16"
+      type    = "IPV4"
+    },
+    {
+      "value" = "127.0.0.1/32"
+      type    = "IPV4"
+    },
+  ]
+  waf_prefix = "portal"
+}
+
+module "regional_waf" {
+  source             = "../../submodules/waf_owasp_top10"
+  waf_prefix         = local.waf_prefix
+  blacklisted_ips    = local.blacklisted_ips
+  admin_remote_ipset = var.admin_remote_ipset
+}
+
+resource "aws_wafregional_web_acl_association" "portal_alb_waf" {
+  resource_arn = aws_alb.portal.arn
+  web_acl_id   = module.regional_waf.web_acl_id
+}