瀏覽代碼

Merge branch 'master' into feature/bp_MSOCI-1448_portal_gc

Brad Poulton 4 年之前
父節點
當前提交
09645348ad

+ 5 - 0
base/dns/inbound_dns_resolver/README.md

@@ -0,0 +1,5 @@
+# Creates an Inbound DNS Resolver
+
+Allows systems to do DNS lookups against a private DNS zone
+
+Used for legacy to allow inbound dns to msoc.defpoint.local

+ 56 - 0
base/dns/inbound_dns_resolver/main.tf

@@ -0,0 +1,56 @@
+resource "aws_route53_resolver_endpoint" "private_resolver" {
+  name      = "xdr_msoc_local"
+  direction = "INBOUND"
+
+  security_group_ids = [ aws_security_group.resolver_security_group.id ]
+
+  dynamic "ip_address" {
+    for_each = var.subnets
+
+    content {
+      subnet_id = ip_address.value
+    }
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group" "resolver_security_group" {
+  name        = "route53_resolver_inbound"
+  description = "Allow DNS inbound traffic"
+  vpc_id      = var.primary_vpc
+
+  ingress {
+    description = "DNS_UDP"
+    from_port   = 53
+    to_port     = 53
+    protocol    = "udp"
+    cidr_blocks = [ "10.0.0.0/8" ]
+  }
+
+  ingress {
+    description = "DNS_TCP"
+    from_port   = 53
+    to_port     = 53
+    protocol    = "tcp"
+    cidr_blocks = [ "10.0.0.0/8" ]
+  }
+
+  egress {
+    description = "DNS_UDP"
+    from_port   = 53
+    to_port     = 53
+    protocol    = "udp"
+    cidr_blocks = [ "10.0.0.0/8" ]
+  }
+
+  egress {
+    description = "DNS_TCP"
+    from_port   = 53
+    to_port     = 53
+    protocol    = "tcp"
+    cidr_blocks = [ "10.0.0.0/8" ]
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}

+ 3 - 0
base/dns/inbound_dns_resolver/outputs.tf

@@ -0,0 +1,3 @@
+output "resolver_endpoint_ips" {
+  value = [ for r in aws_route53_resolver_endpoint.private_resolver.ip_address: r.ip ]
+}

+ 18 - 0
base/dns/inbound_dns_resolver/vars.tf

@@ -0,0 +1,18 @@
+variable "primary_vpc" { 
+  description = "VPC for the inbound connector"
+  type = string 
+}
+
+variable "subnets" { 
+  description = "Subnets in which to create the resolver."
+  type = list 
+}
+
+# inherited variables
+variable tags { type = map }
+variable inbound_resolver_endpoints { type = list }
+variable dns_info { type = map }
+variable standard_tags { type = map }
+variable aws_account_id { type = string }
+variable aws_partition { type = string }
+variable account_list { type = list }

+ 18 - 0
base/dns/private_dns/main.tf

@@ -27,6 +27,15 @@ resource "aws_route53_zone" "private" {
   }
 }
 
+resource "aws_route53_record" "dnstest" {
+  zone_id = aws_route53_zone.private.id
+  name    = "dnstest"
+  type    = "A"
+  ttl     = "300"
+  # Non-routable Test IP: https://tools.ietf.org/html/rfc5737
+  records = [ "10.10.10.10" ]
+}
+
 resource "aws_route53_zone_association" "associations" {
   for_each = toset(local.remaining_vpcs)
 
@@ -53,6 +62,15 @@ resource "aws_route53_zone" "reverse" {
   }
 }
 
+resource "aws_route53_record" "dnstest_reverse" {
+  zone_id = aws_route53_zone.reverse.id
+  name    = "10.10.10"
+  type    = "PTR"
+  ttl     = "300"
+  # Non-routable Test IP: https://tools.ietf.org/html/rfc5737
+  records = [ "dnstest.${ var.dns_info["private"]["zone"] }" ]
+}
+
 resource "aws_route53_zone_association" "reverse_associations" {
   for_each = toset(local.remaining_vpcs)
 

+ 14 - 0
base/dns/private_dns/outputs.tf

@@ -9,3 +9,17 @@ output "reverse_zone_id" {
 output dns_servers {
   value = [ for ipblock in aws_route53_resolver_endpoint.private_resolver.ip_address: ipblock["ip"] ]
 }
+
+output test_fwd_dns_entry {
+  value = { 
+    "fqdn": aws_route53_record.dnstest.fqdn,
+    "value": tolist(aws_route53_record.dnstest.records)[0]
+  }
+}
+
+output test_reverse_dns_entry {
+  value = { 
+    "fqdn": aws_route53_record.dnstest_reverse.fqdn, # lets not make this harder than it has to be
+    "value": tolist(aws_route53_record.dnstest_reverse.records)[0]
+  }
+}

+ 10 - 0
base/dns/public_dns/main.tf

@@ -88,3 +88,13 @@ resource "aws_route53_record" "soa_for_delegated" {
 
   records = each.value
 }
+
+resource "aws_route53_record" "dnstest" {
+  for_each = toset(var.hosted_public_dns_zones)
+  zone_id = aws_route53_zone.public[each.value].id
+  name    = "dnstest"
+  type    = "A"
+  ttl     = "300"
+  # Non-routable Test IP: https://tools.ietf.org/html/rfc5737
+  records = [ "203.0.113.1" ]
+}

+ 4 - 0
base/dns/public_dns/outputs.tf

@@ -5,3 +5,7 @@ output dns_zone_map {
       domain => value["zone_id"]
   }
 }
+
+output dns_entries_for_testing {
+  value = [ for r in aws_route53_record.dnstest: { "fqdn": r.fqdn, "value": tolist(r.records)[0] } ]
+}

+ 1 - 1
base/interconnects/cloud-init/cloud-init.tpl

@@ -9,7 +9,7 @@ write_files:
     ${fqdn}
   path: /etc/salt/minion_id
 - content: |
-    master: ${salt_master}
+    master: ${saltmaster}
   path: /etc/salt/minion
 - content: |
     grains:

+ 1 - 0
base/keycloak-single-instance/amis.tf

@@ -0,0 +1 @@
+../amis.tf

+ 74 - 0
base/keycloak-single-instance/cloud-init/cloud-init.tpl

@@ -0,0 +1,74 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+# but no proxy for the proxy. Commenting these out for other proxies
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.us-east-1.amazonaws.com,ec2messages.us-east-1.amazonaws.com,ec2.us-east-1.amazonaws.com,ssmmessages.us-east-1.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_region: ${ aws_region }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 188 - 0
base/keycloak-single-instance/main.tf

@@ -0,0 +1,188 @@
+# Some instance variables
+locals {
+  ami_selection = "minion" # master, minion, ...
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "instance" {
+  subnet_id = var.subnets[0]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.instance.id ]
+  description = var.instance_name
+  tags = merge(var.standard_tags, var.tags, { Name = var.instance_name })
+}
+
+resource "aws_eip" "instance" {
+  vpc = true
+  tags = merge(var.standard_tags, var.tags, { Name = var.instance_name })
+}
+
+resource "aws_eip_association" "instance" {
+  network_interface_id = aws_network_interface.instance.id
+  allocation_id = aws_eip.instance.id
+}
+
+resource "aws_instance" "instance" {
+  #availability_zone = var.azs[count.index % 2]
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "msoc-default-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp2"
+      #volume_size = "60"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    #volume_size = 48
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.instance.id
+  }
+
+  user_data = data.template_cloudinit_config.cloud_init_config.rendered
+  tags = merge( var.standard_tags, var.tags, { Name = var.instance_name })
+  volume_tags = merge( var.standard_tags, var.tags, { Name = var.instance_name })
+}
+
+module "private_dns_record" {
+  source = "../../submodules/dns/private_A_record"
+
+  name = var.instance_name
+  ip_addresses = [ aws_instance.instance.private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}
+
+module "public_dns_record" {
+  source = "../../submodules/dns/public_A_record"
+
+  name = var.instance_name
+  ip_addresses = [ aws_eip.instance.public_ip ]
+  dns_info = var.dns_info
+
+  providers = {
+    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+  }
+}
+
+#The Cloud init data is to prepare the instance for use. 
+data "template_file" "cloud_init" {
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
+
+  vars = {
+    hostname = var.instance_name
+    fqdn = "${var.instance_name}.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+    aws_region = var.aws_region
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud_init_config" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = data.template_file.cloud_init.rendered
+  }
+}

+ 11 - 0
base/keycloak-single-instance/outputs.tf

@@ -0,0 +1,11 @@
+output instance_arn {
+  value = aws_instance.instance.arn
+}
+
+output instance_public_ip {
+  value = aws_eip.instance.public_ip
+}
+
+output instance_private_ip {
+  value = aws_instance.instance.private_ip
+}

+ 115 - 0
base/keycloak-single-instance/security-groups.tf

@@ -0,0 +1,115 @@
+# Rather than pass in the aws security group, we just look it up. This will
+# probably be useful other places, as well.
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+# For now, opening everything:
+#   ajp port: 8009
+#   http: 8080
+#   https: 8443
+#   mgmt-http: 9990
+#   mgmt-https: 9993
+#   txn-recovery-environment: 4712
+#   txn-status-manager: 4713
+#
+#   Also opening 80 and 443 for certbot
+
+resource "aws_security_group" "instance" {
+  name = "instance-${var.instance_name}"
+  description = "Instances of type ${var.instance_name}"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group_rule" "instance-http-in" {
+  description = ""
+  type = "ingress"
+  from_port = "80"
+  to_port = "80"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-https-in" {
+  description = ""
+  type = "ingress"
+  from_port = "443"
+  to_port = "443"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-ajp-in" {
+  description = ""
+  type = "ingress"
+  from_port = "8009"
+  to_port = "8009"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-alt-http-in" {
+  description = ""
+  type = "ingress"
+  from_port = "8080"
+  to_port = "8080"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-alt-https-in" {
+  description = ""
+  type = "ingress"
+  from_port = "8443"
+  to_port = "8443"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-mgmt-http-in" {
+  description = ""
+  type = "ingress"
+  from_port = "9990"
+  to_port = "9990"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-mgmt-https-in" {
+  description = ""
+  type = "ingress"
+  from_port = "9993"
+  to_port = "9993"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-txn-in" {
+  description = ""
+  type = "ingress"
+  from_port = "4712"
+  to_port = "4713"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}
+
+# lock down before production, but I couldn't get letsencrypt to work with the proxy
+resource "aws_security_group_rule" "instance-all-out" {
+  description = ""
+  type = "egress"
+  from_port = "-1"
+  to_port = "-1"
+  protocol = "-1"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}

+ 52 - 0
base/keycloak-single-instance/vars.tf

@@ -0,0 +1,52 @@
+variable "instance_name" {
+  description = "Hostname, DNS entry, etc."
+  type = string
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "subnets" {
+  type = list(string)
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "xdr_interconnect" { type = list(string) }
+variable "nga_pop" { type = list(string) }
+variable "afs_azure_pop" { type = list(string) }
+variable "afs_pop" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "aws_account_id" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/keycloak-single-instance/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.13"
+}

+ 61 - 19
base/sensu-configuration/checks.tf

@@ -2,6 +2,48 @@ locals {
   splunk_hot = var.environment == "test" ? "10000" : "50000"
   interconnect-0 = var.environment == "test" ? ["169.254.230.197", "169.254.142.233", "169.254.221.229", "169.254.145.141"] : ["169.254.152.217", "169.254.88.105", "169.254.253.45", "169.254.91.129"]
   interconnect-1 = var.environment == "test" ? ["169.254.186.189", "169.254.119.73", "169.254.20.161", "169.254.128.189"] : ["169.254.247.157", "169.254.246.157", "169.254.22.21", "169.254.38.13"]
+  dns_checks_all = var.environment == "test" ? {
+    "dnstest.accenturefederalcyber.com": "203.0.113.1",
+    "dnstest.xdrtest.accenturefederalcyber.com": "203.0.113.1",
+  } : { 
+    "dnstest.accenturefederalcyber.com": "203.0.113.1",
+    "dnstest.xdr.accenturefederalcyber.com": "203.0.113.1",
+  }
+  dns_checks_private = var.environment == "test" ? {
+    "dnstest.pvt.xdrtest.accenturefederalcyber.com": "10.10.10.10",
+    "10.10.10.10": "dnstest.pvt.xdrtest.accenturefederalcyber.com.",
+    "dnstest.msoc.defpoint.local": "10.10.10.10",
+    "dnstest.mdr-test.defpoint.com": "10.10.10.10",
+  } : {
+    "dnstest.pvt.xdr.accenturefederalcyber.com": "10.10.10.10",
+    "10.10.10.10": "dnstest.pvt.xdr.accenturefederalcyber.com.",
+    "dnstest.msoc.defpoint.local": "10.10.10.10",
+    "dnstest.mdr.defpoint.com": "10.10.10.10",
+  }
+}
+
+resource "sensu_check" "check_dns_all" {
+  for_each       = local.dns_checks_all
+  name           = "check_dns_${ each.key }"
+  command        = "check_dns --hostname=${ each.key } --expected-address=${ each.value } --warning=${ var.sensu_checks["dns"]["warning"] }  --critical=${ var.sensu_checks["dns"]["critical"] }"
+  namespace      = "default"
+  subscriptions  = [ "linux", ]
+  handlers       = [ "victorops", "logfile", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-monitoring", ]
+}
+
+resource "sensu_check" "check_dns_private" {
+  for_each       = local.dns_checks_private
+  name           = "check_dns_${ each.key }"
+  command        = "check_dns --hostname=${ each.key } --expected-address=${ each.value } --warning=${ var.sensu_checks["dns"]["warning"] }  --critical=${ var.sensu_checks["dns"]["critical"] }"
+  namespace      = "default"
+  subscriptions  = [ "private_dns_client", ]
+  handlers       = [ "victorops", "logfile", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-monitoring", ]
 }
 
 resource "sensu_check" "check_disk_base" {
@@ -9,7 +51,7 @@ resource "sensu_check" "check_disk_base" {
   command        = "check_disk -c 250 -p /var -C -c 500 -p /var/log -C -c 1000 -p /var/log/audit -C -c 2000 -p /opt -C -c 500 -p /boot -C -c 1000 -p /"
   namespace      = "default"
   subscriptions  = [ "linux", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-monitoring", ]
@@ -20,7 +62,7 @@ resource "sensu_check" "check_disk_indexer" {
   command        = "check_disk -c ${local.splunk_hot} -p /opt/splunkdata/hot -C -c 5000 -p /opt/splunk"
   namespace      = "default"
   subscriptions  = [ "check_disk_indexer", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-monitoring", ]
@@ -31,7 +73,7 @@ resource "sensu_check" "check_disk_syslog" {
   command        = "check_disk -c 7000 -p /opt/syslog-ng"
   namespace      = "default"
   subscriptions  = [ "check_disk_syslog", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-monitoring", ]
@@ -40,10 +82,10 @@ resource "sensu_check" "check_disk_syslog" {
 resource "sensu_check" "check_ping_interconnect-0" {
   for_each       = toset(local.interconnect-0)
   name           = "ping_interconnect-0-${index(local.interconnect-0, each.value) +1}"
-  command        = "check_ping -H ${each.value} -w 100,80% -c 100,80% -4"
+  command        = "check_ping -H ${each.value} -w 500,80% -c 500,80% -4"
   namespace      = "default"
   subscriptions  = [ "interconnect-0", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-monitoring", ]
@@ -52,10 +94,10 @@ resource "sensu_check" "check_ping_interconnect-0" {
 resource "sensu_check" "check_ping_interconnect-1" {
   for_each       = toset(local.interconnect-1)
   name           = "ping_interconnect-1-${index(local.interconnect-1, each.value) +1}"
-  command        = "check_ping -H ${each.value} -w 100,80% -c 100,80% -4"
+  command        = "check_ping -H ${each.value} -w 500,80% -c 500,80% -4"
   namespace      = "default"
   subscriptions  = [ "interconnect-1", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-monitoring", ]
@@ -66,7 +108,7 @@ resource "sensu_check" "check_phantom_ports" {
   command        = "check-ports.rb -h 0.0.0.0 -p 443"
   namespace      = "default"
   subscriptions  = [ "phantom_ports", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
@@ -79,7 +121,7 @@ resource "sensu_check" "check_portal_http" {
   command        = "metrics-curl.rb -u https://portal.xdr.accenturefederalcyber.com"
   namespace      = "default"
   subscriptions  = [ "portal", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-http", "sensu-ruby-runtime", ]
@@ -90,7 +132,7 @@ resource "sensu_check" "check_salt_master_ports" {
   command        = "check-ports.rb -h 0.0.0.0 -p 4505,4506"
   namespace      = "default"
   subscriptions  = [ "salt_master_ports", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
@@ -101,7 +143,7 @@ resource "sensu_check" "check_splunk_cm_ports" {
   command        = "check-ports.rb -h 0.0.0.0 -p 8089"
   namespace      = "default"
   subscriptions  = [ "splunk_cm_ports", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
@@ -112,7 +154,7 @@ resource "sensu_check" "check_splunk_ds_ports" {
   command        = "check-ports.rb -h 0.0.0.0 -p 8089"
   namespace      = "default"
   subscriptions  = [ "splunk_ds_ports", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
@@ -123,7 +165,7 @@ resource "sensu_check" "check_splunk_hf_ports" {
   command        = "check-ports.rb -h 0.0.0.0 -p 8089,8000"
   namespace      = "default"
   subscriptions  = [ "splunk_hf_ports", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
@@ -134,7 +176,7 @@ resource "sensu_check" "check_splunk_indexer_ports_moose" {
   command        = "check-ports.rb -h 0.0.0.0 -p 8089,9998,9887,8088"
   namespace      = "default"
   subscriptions  = [ "splunk_indexer_ports_moose", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
@@ -145,7 +187,7 @@ resource "sensu_check" "check_splunk_indexer_ports" {
   command        = "check-ports.rb -h 0.0.0.0 -p 8089,9998,9887"
   namespace      = "default"
   subscriptions  = [ "splunk_indexer_ports", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
@@ -156,7 +198,7 @@ resource "sensu_check" "check_splunk_sh_ports" {
   command        = "check-ports.rb -h 0.0.0.0 -p 8089,8000"
   namespace      = "default"
   subscriptions  = [ "splunk_sh_ports", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
@@ -167,7 +209,7 @@ resource "sensu_check" "check_splunk_uf_ports" {
   command        = "check-ports.rb -h 0.0.0.0 -p 8089"
   namespace      = "default"
   subscriptions  = [ "splunk", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
@@ -178,8 +220,8 @@ resource "sensu_check" "check_syslog-ng_service" {
   command        = "check-process.rb -p syslog-ng"
   namespace      = "default"
   subscriptions  = [ "syslog-ng_service", ]
-  handlers       = [ "victorops", ]
+  handlers       = [ "victorops", "logfile", ]
   cron           = "* * * * *"
   publish        = "true"
   runtime_assets = [ "sensu-plugins-process-checks", "sensu-ruby-runtime", ]
-}
+}

+ 11 - 1
base/sensu-configuration/handlers.tf

@@ -17,4 +17,14 @@ resource "sensu_handler" "handler_victorops" {
   filters        = [ "is_incident", "not_silenced", "handler-delay", ]
   runtime_assets = [ "sensu-plugins-victorops", "sensu-ruby-runtime", ]
   command        = "handler-victorops.rb --map_go_event_into_ruby -a https://alert.victorops.com/integrations/generic/20131114/alert/864a1b38-4243-4137-8baa-b587ba5f300b/ -r ${local.victorops_team}"
-}
+}
+
+resource "sensu_handler" "handler_logfile" {
+  name           = "logfile"
+  type           = "pipe"
+  namespace      = "default"
+  handlers       = [ ]
+  filters        = [ ]
+  runtime_assets = [ "sensu-ruby-runtime", ]
+  command        = "/usr/local/bin/stdin_to_log.py"
+}

+ 1 - 0
base/sensu-configuration/vars.tf

@@ -1,2 +1,3 @@
+variable "sensu_checks" { type = map }
 variable "dns_info" { type = map }
 variable "environment" { type = string }

+ 1 - 1
base/splunk_servers/cluster_master/main.tf

@@ -236,7 +236,7 @@ resource "aws_security_group_rule" "splunk-api-in" {
   from_port         = 8089
   to_port           = 8089
   protocol          = "tcp"
-  cidr_blocks       = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ], var.cidr_map["vpc-access"]))
+  cidr_blocks       = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ], var.cidr_map["vpc-access"], var.cidr_map["vpc-private-services"]))
   security_group_id = aws_security_group.cluster_master_security_group.id
 }
 

+ 1 - 3
base/splunk_servers/heavy_forwarder/main.tf

@@ -226,9 +226,7 @@ resource "aws_security_group_rule" "splunk-api-in" {
   from_port         = 8089
   to_port           = 8089
   protocol          = "tcp"
-  # Leaving these commented, as we'll probably need to add to this rule
-  #cidr_blocks       = toset(concat(var.cidr_map["vpc-access"], var.cidr_map["vpc-private-services"]))
-  cidr_blocks       = var.cidr_map["vpc-access"]
+  cidr_blocks       = toset(concat(var.cidr_map["vpc-access"], var.cidr_map["vpc-private-services"]))
   security_group_id = aws_security_group.heavy_forwarder_security_group.id
 }
 

+ 2 - 2
base/splunk_servers/indexer_cluster/security-group-indexers.tf

@@ -5,7 +5,7 @@
 # x   tcp/8000      - Splunk Web                 - (local.access_cidrs) vpc-access, legacy openvpn, legacy bastion
 # x   tcp/8088      - Splunk HEC                 - (local.data_sources) Entire VPC + var.additional_source + var.splunk_legacy_cidr
 # x   tcp/8088      - MOOSE ONLY                 - 10.0.0.0/8
-# x   tcp/8089      - Splunk API                 - (local.access_cidrs) vpc-access, legacy openvpn, legacy bastion
+# x   tcp/8089      - Splunk API                 - (local.access_cidrs) vpc-access, legacy openvpn, legacy bastion, legacy infra (vpc-private-services) VPC for monitoring console
 # x   tcp/8089      - Splunk API + IDX Discovery - (local.splunk_vpc_cidrs) Entire VPC + var.splunk_legacy_cidr
 # x   tcp/8089      - MOOSE ONLY                 - 10.0.0.0/8
 # x   tcp/9887      - IDX Replication            - (local.splunk_vpc_cidrs) Entire VPC + var.splunk_legacy_cidr
@@ -15,7 +15,7 @@
 #     tcp/9887      - IDX Replication            - (local.splunk_vpc_cidrs) Entire VPC + var.splunk_legacy_cidr
 #     tcp/8089      - Splunk API + IDX Discovery - (local.splunk_vpc_cidrs) Entire VPC + var.splunk_legacy_cidr
 locals {
-  splunk_vpc_cidrs = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ]))
+  splunk_vpc_cidrs = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ], var.cidr_map["vpc-private-services"]))
   access_cidrs     = var.cidr_map["vpc-access"]
   data_sources     = toset(concat(tolist(local.splunk_vpc_cidrs), var.splunk_data_sources))
 }

+ 33 - 2
base/splunk_servers/legacy_hec/README.md

@@ -1,5 +1,36 @@
-This module creates two HEC load balancers for legacy compatibility. It should not be added to new accounts.
+# Summary
 
-Without this, the existing customer AWS data and other things submitting ot the HEC could get lost during the migration.
+This module creates HEC load balancers using the previous domain name for
+for legacy compatibility. It should not be added to new accounts.
+
+Without this, the existing customer AWS data and other things submitting to the
+HEC could get lost during the migration.
 
 But if activity is low/zero, destroy it!
+
+## iratemoses Aliases
+
+Additionally, for MOOSE only, it creates 'iratemoses' aliases.
+
+## List of HECs
+
+### Permanent:
+
+These are the HECs created by the indexer cluster module, which will stick around:
+
+moose-hec.xdrtest.accenturefederalcyber.com - Non-ack external HEC
+moose-hec-ack.xdrtest.accenturefederalcyber.com - ACK external HEC
+moose-hec.pvt.xdrtest.accenturefederalcyber.com - non-ack internal HEC
+moose-hec-ack.pvt.xdrtest.accenturefederalcyber.com - ACK internal HEC
+
+These are the HECs that are created by the legacy_hec module, which should not be used for any new purposes:
+
+moose-hec.mdr-test.defpoint.com
+moose-hec-ack.mdr-test.defpoint.com
+iratemoses.msoc.defpoint.local (moose only)
+iratemoses.mdr-test.defpoint.com (moose only)
+
+Notably, the following intentionally do not exist:
+
+moose-hec.msoc.defpoint.local
+moose-hec-ack.msoc.defpoint.local

+ 19 - 0
base/splunk_servers/legacy_hec/elb-with-acks.tf

@@ -14,6 +14,25 @@ resource "aws_route53_record" "hec-ack" {
   provider = aws.legacy
 }
 
+resource "aws_route53_record" "hec-ack-internal" {
+  # the 'private' copy of the msoc.defpoint.com domain
+  name = "${var.prefix}-hec-ack"
+  type = "CNAME"
+  zone_id = var.dns_info["legacy_public_internal"]["zone_id"]
+  ttl = "600"
+  records = [ aws_elb.hec_classiclb.dns_name ]
+
+  provider = aws.legacy
+}
+
+output hec-with-acks-fqdn {
+  value = aws_route53_record.hec-ack.fqdn
+}
+
+output hec-with-acks-records {
+  value = aws_elb.hec_classiclb.dns_name
+}
+
 #########################
 # Certificate
 resource "aws_acm_certificate" "hec_classiclb_cert" {

+ 35 - 12
base/splunk_servers/legacy_hec/elb-without-ack-internal.tf

@@ -1,19 +1,39 @@
 #------------------------------------------------------------------------------
-# An external ALB for the indexers for HEC
+# An internal ALB without ACKs for moose only
 #------------------------------------------------------------------------------
 
 #########################
 # DNS Entry
 resource "aws_route53_record" "hec_internal" {
+  count = local.is_moose ? 1 : 0
   name = "iratemoses"
   type = "CNAME"
   zone_id = var.dns_info["legacy_private"]["zone_id"]
   ttl = "600"
-  records = [ aws_lb.hec_internal.dns_name ]
+  records = [ aws_lb.hec_internal[count.index].dns_name ]
 
   provider = aws.legacy
 }
 
+resource "aws_route53_record" "hec_internal_accenturefederalcyber" {
+  count = local.is_moose ? 1 : 0
+  name = "iratemoses"
+  type = "CNAME"
+  zone_id = var.dns_info["private"]["zone_id"]
+  ttl = "600"
+  records = [ aws_lb.hec_internal[count.index].dns_name ]
+
+  provider = aws.c2
+}
+
+output hec-without-acks-internal-fqdn {
+  value = local.is_moose ? aws_route53_record.hec_internal[0].fqdn : "<not created for non-moose>"
+}
+
+output hec-without-acks-internal-records {
+  value = local.is_moose ? aws_lb.hec_internal[0].dns_name : "<not created for non-moose>"
+}
+
 #########################
 # Certificate
 
@@ -22,6 +42,7 @@ resource "aws_route53_record" "hec_internal" {
 #########################
 # ELB
 resource "aws_lb" "hec_internal" {
+  count              = local.is_moose ? 1 : 0
   tags               = merge(var.standard_tags, var.tags)
   name               = "iratemoses"
   load_balancer_type = "application"
@@ -32,31 +53,33 @@ resource "aws_lb" "hec_internal" {
 
 resource "aws_lb_listener" "hec_internal_443" {
   count             = local.is_moose ? 1 : 0
-  load_balancer_arn = aws_lb.hec.arn
+  load_balancer_arn = aws_lb.hec_internal[count.index].arn
   port              = 443
   protocol          = "HTTPS"
   ssl_policy        = "ELBSecurityPolicy-TLS-1-2-2017-01"
-  certificate_arn   = aws_acm_certificate.hec_cert.arn
+  certificate_arn   = aws_acm_certificate.hec_cert.arn # Intentionally using the external cert
   default_action {
     type = "forward"
-    target_group_arn = aws_lb_target_group.hec_internal_8088.arn
+    target_group_arn = aws_lb_target_group.hec_internal_8088[count.index].arn
   }
 }
 
 resource "aws_lb_listener" "hec_internal_8088" {
-  load_balancer_arn = aws_lb.hec.arn
+  count             = local.is_moose ? 1 : 0
+  load_balancer_arn = aws_lb.hec_internal[count.index].arn
   port              = 8088
   protocol          = "HTTPS"
   ssl_policy        = "ELBSecurityPolicy-TLS-1-2-2017-01"
-  certificate_arn   = aws_acm_certificate.hec_cert.arn
+  certificate_arn   = aws_acm_certificate.hec_cert.arn # Intentionally using the external cert
   default_action {
     type = "forward"
-    target_group_arn = aws_lb_target_group.hec_internal_8088.arn
+    target_group_arn = aws_lb_target_group.hec_internal_8088[count.index].arn
   }
 }
 
 resource "aws_lb_target_group" "hec_internal_8088" {
-  name         = "${var.prefix}-legacy-hec-targets"
+  count        = local.is_moose ? 1 : 0
+  name         = "${var.prefix}-legacy-hec-int-tgts"
   port         = 8088
   protocol     = "HTTPS"
   target_type  = "instance"
@@ -68,9 +91,9 @@ resource "aws_lb_target_group" "hec_internal_8088" {
   }
 }
 
-# Attach the instnaces to the ELB
+# Attach the instances to the ELB
 resource "aws_autoscaling_attachment" "hec_internal_asg_attachments" {
-  for_each = toset( var.elb_attachments )
-  alb_target_group_arn = aws_lb_target_group.hec_internal_8088.arn
+  for_each = local.is_moose ? toset( var.elb_attachments ) : []
+  alb_target_group_arn = aws_lb_target_group.hec_internal_8088[0].arn
   autoscaling_group_name = each.key
 }

+ 46 - 3
base/splunk_servers/legacy_hec/elb-without-ack.tf

@@ -14,7 +14,22 @@ resource "aws_route53_record" "hec" {
   provider = aws.legacy
 }
 
+resource "aws_route53_record" "hec_public_internal" {
+  name = "${var.prefix}-hec"
+  type = "CNAME"
+  zone_id = var.dns_info["legacy_public_internal"]["zone_id"]
+  ttl = "600"
+  records = [ aws_lb.hec.dns_name ]
+
+  provider = aws.legacy
+}
+
+output hec-without-ack-fqdn {
+  value = aws_route53_record.hec.fqdn
+}
+
 resource "aws_route53_record" "iratemoses" {
+  count = local.is_moose ? 1 : 0
   name = "iratemoses"
   type = "CNAME"
   zone_id = var.dns_info["legacy_public"]["zone_id"]
@@ -24,15 +39,43 @@ resource "aws_route53_record" "iratemoses" {
   provider = aws.legacy
 }
 
+resource "aws_route53_record" "iratemoses_public_internal" {
+  count = local.is_moose ? 1 : 0
+  name = "iratemoses"
+  type = "CNAME"
+  zone_id = var.dns_info["legacy_public_internal"]["zone_id"]
+  ttl = "600"
+  records = [ aws_lb.hec.dns_name ]
+
+  provider = aws.legacy
+}
+
+resource "aws_route53_record" "iratemoses_public" {
+  count = local.is_moose ? 1 : 0
+  name = "iratemoses"
+  type = "CNAME"
+  zone_id = var.dns_info["public"]["zone_id"]
+  ttl = "600"
+  records = [ aws_lb.hec.dns_name ]
+
+  provider = aws.mdr-common-services-commercial
+}
+
+output hec-without-ack-iratemoses-fqdn {
+  value = local.is_moose ? aws_route53_record.iratemoses[0].fqdn : "<not created for non-moose>"
+}
+
+output hec-without-ack-records {
+  value = aws_lb.hec.dns_name
+}
+
 #########################
 # Certificate
 resource "aws_acm_certificate" "hec_cert" {
   domain_name       = "${var.prefix}-hec.${var.dns_info["legacy_public"]["zone"]}"
   validation_method = "DNS"
 
-  subject_alternative_names = [
-    "iratemoses.${var.dns_info["legacy_public"]["zone"]}",
-  ]
+  subject_alternative_names = local.is_moose ? [ "iratemoses.${var.dns_info["legacy_public"]["zone"]}" ] : [ ]
 
   tags = merge(var.standard_tags, var.tags)
 }

+ 13 - 4
base/splunk_servers/searchhead/elb.tf

@@ -1,3 +1,12 @@
+locals {
+  # alb_clients access the SH
+  alb_clients = toset(concat(
+    var.cidr_map["vpc-access"], # VPN users
+    var.cidr_map["vpc-system-services"], # Salt master, etc
+    var.cidr_map["vpc-private-services"], # fm-shared search, qcompliance, phantom
+  ))
+}
+
 resource "aws_lb" "searchhead-alb" {
   name               = "${var.prefix}-searchhead-alb"
   internal           = true
@@ -145,7 +154,7 @@ resource "aws_security_group_rule" "searchhead-alb-api-in" {
   from_port         = 8089
   to_port           = 8089
   protocol          = "tcp"
-  cidr_blocks       = var.cidr_map["vpc-access"]
+  cidr_blocks       = local.alb_clients
   security_group_id = aws_security_group.searchhead-alb-sg.id
 }
 
@@ -154,7 +163,7 @@ resource "aws_security_group_rule" "searchhead-alb-https-in" {
   from_port         = 443
   to_port           = 443
   protocol          = "tcp"
-  cidr_blocks       = var.cidr_map["vpc-access"]
+  cidr_blocks       = local.alb_clients
   security_group_id = aws_security_group.searchhead-alb-sg.id
 }
 
@@ -163,7 +172,7 @@ resource "aws_security_group_rule" "searchhead-alb-8000-in" {
   from_port         = 8000
   to_port           = 8000
   protocol          = "tcp"
-  cidr_blocks       = var.cidr_map["vpc-access"]
+  cidr_blocks       = local.alb_clients
   security_group_id = aws_security_group.searchhead-alb-sg.id
 }
 
@@ -173,7 +182,7 @@ resource "aws_security_group_rule" "searchhead-http-in" {
   from_port         = 80
   to_port           = 80
   protocol          = "tcp"
-  cidr_blocks       = var.cidr_map["vpc-access"]
+  cidr_blocks       = local.alb_clients
   security_group_id = aws_security_group.searchhead-alb-sg.id
 }
 

+ 2 - 2
submodules/security_group/typical_host/main.tf

@@ -148,8 +148,8 @@ resource "aws_security_group_rule" "outbound_to_mailrelay_25" {
   from_port = 25
   to_port = 25
   protocol = "tcp"
-  cidr_blocks = var.cidr_map["smtp"]
-  count = length(var.cidr_map["smtp"]) > 0 ? 1 : 0
+  cidr_blocks = var.cidr_map["vpc-system-services"]
+  count = length(var.cidr_map["vpc-system-services"]) > 0 ? 1 : 0
 }
 
 resource "aws_security_group_rule" "outbound_to_ec2_s3_endpoint" {