Przeglądaj źródła

Merge pull request #33 from mdr-engineering/feature/ftd_MSOCI-1355_dns_test_instances

DNS Resolver and Test Instances
Frederick Damstra 5 lat temu
rodzic
commit
c132ddb848

+ 1 - 1
base/dns/private_dns/main.tf

@@ -69,7 +69,7 @@ resource "aws_route53_resolver_endpoint" "private_resolver" {
   security_group_ids = [ aws_security_group.resolver_security_group.id ]
 
   dynamic "ip_address" {
-    for_each = var.subnets
+    for_each = slice(var.subnets, 0, 2)
 
     content {
       subnet_id = ip_address.value

+ 6 - 0
base/dns/resolver_instance/README.md

@@ -0,0 +1,6 @@
+# Test Instance
+
+Nothing special here. Just a test instance for troubleshooting. Set `create_test_instance` to false when not in use.
+
+Base install, full access in and out.
+

+ 89 - 0
base/dns/resolver_instance/amis.tf

@@ -0,0 +1,89 @@
+locals {
+  ami_map = {
+    "base"       = data.aws_ami.base.image_id,
+    "minion"     = data.aws_ami.minion.image_id,
+    "master"     = data.aws_ami.master.image_id,
+    #    "ubuntu1804" = data.aws_ami.ubuntu1804.image_id,
+  }
+}
+
+data "aws_ami" "base" {
+  most_recent = true
+  owners = [ var.common_services_account ]
+
+  filter {
+    name   = "virtualization-type"
+    values = ["hvm"]
+  }
+
+  filter {
+    name = "root-device-type"
+    values = ["ebs"]
+  }
+
+  filter {
+    name = "name"
+    values = [ "MSOC_RedHat_Base_*" ]
+  }
+}
+
+data "aws_ami" "minion" {
+  most_recent = true
+  owners = [ var.common_services_account ]
+
+  filter {
+    name   = "virtualization-type"
+    values = ["hvm"]
+  }
+
+  filter {
+    name = "root-device-type"
+    values = ["ebs"]
+  }
+
+  filter {
+    name = "name"
+    values = [ "MSOC_RedHat_Minion_*" ]
+  }
+}
+
+data "aws_ami" "master" {
+  most_recent = true
+  owners = [ var.common_services_account ]
+
+  filter {
+    name   = "virtualization-type"
+    values = ["hvm"]
+  }
+
+  filter {
+    name = "root-device-type"
+    values = ["ebs"]
+  }
+
+  filter {
+    name = "name"
+    values = [ "MSOC_RedHat_Master_*" ]
+  }
+}
+
+# Not presently in commercial
+#data "aws_ami" "ubuntu1804" {
+#  most_recent = true
+#  owners = [ var.common_services_account ]
+#
+#  filter {
+#    name   = "virtualization-type"
+#    values = ["hvm"]
+#  }
+#
+#  filter {
+#    name = "root-device-type"
+#    values = ["ebs"]
+#  }
+#
+#  filter {
+#    name = "name"
+#    values = [ "MSOC_Ubuntu_1804_*" ]
+#  }
+#}

+ 60 - 0
base/dns/resolver_instance/cloud-init/cloud-init.tpl

@@ -0,0 +1,60 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${saltmaster}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${saltmaster}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+yum_repos:
+  epel-release:
+    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+    enabled: false
+    failovermethod: priority
+    gpgcheck: true
+    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 163 - 0
base/dns/resolver_instance/main.tf

@@ -0,0 +1,163 @@
+locals {
+  instance_name = "resolver-${var.aws_partition_alias}"
+}
+
+resource "aws_network_interface" "instance" {
+  subnet_id = var.subnet_id
+  security_groups = [ module.required_security_group.id, aws_security_group.dns_security_group.id ]
+  description = local.instance_name
+  tags = merge(var.standard_tags, var.tags, { Name = local.instance_name })
+}
+
+resource "aws_eip" "instance" {
+  vpc = true
+  tags = merge(var.standard_tags, var.tags, { Name = local.instance_name })
+}
+
+resource "aws_eip_association" "instance" {
+  network_interface_id = aws_network_interface.instance.id
+  allocation_id = aws_eip.instance.id
+}
+
+resource "aws_instance" "instance" {
+  #availability_zone = var.azs[count.index % 2]
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.resolver_instance_type
+  key_name = var.resolver_instance_key_name
+  monitoring = false
+
+  ami = local.ami_map["minion"]
+  lifecycle { ignore_changes = [ ami, key_name, user_data ] }
+
+
+  root_block_device {
+      volume_type = "gp2"
+      #volume_size = "60"
+      delete_on_termination = true
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.instance.id
+  }
+
+  user_data = data.template_cloudinit_config.cloud-init.rendered
+  tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
+}
+
+module "private_dns_record" {
+  source = "../../../submodules/dns/private_A_record"
+
+  name = local.instance_name
+  ip_addresses = [ aws_instance.instance.private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}
+
+module "public_dns_record" {
+  source = "../../../submodules/dns/public_A_record"
+
+  name = local.instance_name
+  ip_addresses = [ aws_eip.instance.public_ip ]
+  dns_info = var.dns_info
+
+  providers = {
+    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+  }
+}
+
+data "template_file" "cloud-init" {
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = "${file("${path.module}/cloud-init/cloud-init.tpl")}"
+
+  vars = {
+    hostname = local.instance_name
+    fqdn = "dns-${var.aws_partition_alias}.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    # can't use the DNS name like we would most places, because this is the DNS server
+    saltmaster  = var.salt_master_ip
+    proxy = var.proxy_ip
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud-init" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = "${data.template_file.cloud-init.rendered}"
+  }
+
+  # Additional parts as needed
+  #part {
+  #  content_type = "text/x-shellscript"
+  #  content      = "ffbaz"
+  #}
+}
+
+module "required_security_group" {
+  source = "../../../submodules/security_group/required_group"
+
+  vpc_id = var.vpc_id
+  cidr_map = var.cidr_map
+  tags = merge(var.standard_tags, var.tags)
+  aws_region = var.aws_region
+  aws_partition = var.aws_partition
+}
+
+resource "aws_security_group" "dns_security_group" {
+  name = "dns_security_group"
+  description = "DNS Security Group"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group_rule" "dns-tcp" {
+  type              = "ingress"
+  from_port         = 53
+  to_port           = 53
+  protocol          = "tcp"
+  cidr_blocks       = [ "10.0.0.0/8" ]
+  security_group_id = aws_security_group.dns_security_group.id
+}
+
+resource "aws_security_group_rule" "dns-udp" {
+  type              = "ingress"
+  from_port         = 53
+  to_port           = 53
+  protocol          = "udp"
+  cidr_blocks       = [ "10.0.0.0/8" ]
+  security_group_id = aws_security_group.dns_security_group.id
+}
+
+resource "aws_security_group_rule" "dns_outbound_tcp" {
+  type = "egress"
+  from_port = 53
+  to_port = 53
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.dns_security_group.id
+}
+
+resource "aws_security_group_rule" "dns_outbound_udp" {
+  type = "egress"
+  from_port = 53
+  to_port = 53
+  protocol = "udp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.dns_security_group.id
+}

+ 11 - 0
base/dns/resolver_instance/outputs.tf

@@ -0,0 +1,11 @@
+output instance_arn {
+  value = aws_instance.instance.arn
+}
+
+output instance_public_ip {
+  value = aws_eip.instance.public_ip
+}
+
+output instance_private_ip {
+  value = aws_instance.instance.private_ip
+}

+ 36 - 0
base/dns/resolver_instance/vars.tf

@@ -0,0 +1,36 @@
+variable "subnet_id" {
+  type = string
+}
+variable "vpc_id" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "resolver_instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "proxy_ip" { type = string }
+variable "salt_master_ip" { type = string }
+variable "cidr_map" { type = map }
+variable "resolver_instance_key_name" { type = string }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/dns/resolver_instance/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.12"
+}

+ 2 - 0
base/interconnects/cloud-init/cloud-init.tpl

@@ -9,6 +9,7 @@ fqdn: ${fqdn}
 yum_repos:
   epel-release:
     baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+    # TODO: Disable this by default
     enabled: true
     failovermethod: priority
     gpgcheck: true
@@ -26,6 +27,7 @@ growpart:
   ignore_growroot_disabled: false
 
 runcmd:
+ - 'mkdir -p /etc/salt/minion.d'
  - 'echo ${fqdn} > /etc/salt/minion_id'
  - 'echo master: ${saltmaster} > /etc/salt/minion'
  - 'echo grains: > /etc/salt/minion.d/cloud_init_grains.conf'

+ 1 - 1
base/interconnects/main.tf

@@ -47,7 +47,7 @@ resource "aws_instance" "interconnects" {
   monitoring = false
 
   ami = var.default_ami
-  lifecycle { ignore_changes = [ ami ] }
+  lifecycle { ignore_changes = [ ami, key_name, user_data ] }
 
   tags = merge(
     var.standard_tags,

+ 89 - 0
base/test_instance/amis.tf

@@ -0,0 +1,89 @@
+locals {
+  ami_map = {
+    "base"       = data.aws_ami.base.image_id,
+    "minion"     = data.aws_ami.minion.image_id,
+    "master"     = data.aws_ami.master.image_id,
+    #    "ubuntu1804" = data.aws_ami.ubuntu1804.image_id,
+  }
+}
+
+data "aws_ami" "base" {
+  most_recent = true
+  owners = [ var.common_services_account ]
+
+  filter {
+    name   = "virtualization-type"
+    values = ["hvm"]
+  }
+
+  filter {
+    name = "root-device-type"
+    values = ["ebs"]
+  }
+
+  filter {
+    name = "name"
+    values = [ "MSOC_RedHat_Base_*" ]
+  }
+}
+
+data "aws_ami" "minion" {
+  most_recent = true
+  owners = [ var.common_services_account ]
+
+  filter {
+    name   = "virtualization-type"
+    values = ["hvm"]
+  }
+
+  filter {
+    name = "root-device-type"
+    values = ["ebs"]
+  }
+
+  filter {
+    name = "name"
+    values = [ "MSOC_RedHat_Minion_*" ]
+  }
+}
+
+data "aws_ami" "master" {
+  most_recent = true
+  owners = [ var.common_services_account ]
+
+  filter {
+    name   = "virtualization-type"
+    values = ["hvm"]
+  }
+
+  filter {
+    name = "root-device-type"
+    values = ["ebs"]
+  }
+
+  filter {
+    name = "name"
+    values = [ "MSOC_RedHat_Master_*" ]
+  }
+}
+
+# not presently in commercial
+#data "aws_ami" "ubuntu1804" {
+#  most_recent = true
+#  owners = [ var.common_services_account ]
+#
+#  filter {
+#    name   = "virtualization-type"
+#    values = ["hvm"]
+#  }
+#
+#  filter {
+#    name = "root-device-type"
+#    values = ["ebs"]
+#  }
+#
+#  filter {
+#    name = "name"
+#    values = [ "MSOC_Ubuntu_1804_*" ]
+#  }
+#}

+ 39 - 9
base/test_instance/cloud-init/cloud-init.tpl

@@ -1,21 +1,51 @@
 #cloud-config
-# TODO: This needs to be customized/fixed
 preserve_hostname: false
 hostname: ${hostname}
+salt-master: ${saltmaster}
 fqdn: ${fqdn}
 
+# NOTE: See the cloud-init.tpl under dns/resolver_instance for an
+#       example of how to do this on a system that needs the proxy.
+
+yum_repos:
+  epel-release:
+    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+    enabled: false
+    failovermethod: priority
+    gpgcheck: true
+    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
 runcmd:
- - echo "${fqdn}" > /etc/salt/minion_id
- - /bin/systemctl restart salt-minion 
+ - 'mkdir -p /etc/salt/minion.d'
+ - 'echo ${fqdn} > /etc/salt/minion_id'
+ - 'echo master: ${saltmaster} > /etc/salt/minion'
+ - 'echo grains: > /etc/salt/minion.d/cloud_init_grains.conf'
+ - 'echo "  environment:         " ${ environment }         >> /etc/salt/minion.d/cloud_init_grains.conf'
+ - 'echo "  aws_partition:       " ${ aws_partition }       >> /etc/salt/minion.d/cloud_init_grains.conf'
+ - 'echo "  aws_partition_alias: " ${ aws_partition_alias } >> /etc/salt/minion.d/cloud_init_grains.conf'
+ - /bin/systemctl restart salt-minion
  - /bin/systemctl enable salt-minion
  - /bin/systemctl start amazon-ssm-agent
  - /bin/systemctl enable amazon-ssm-agent
  - /usr/sbin/aide --update --verbose=0
  - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
 
-# httpd here to avoid a chicken and egg for repo server
-# installing it via salt state won't work because by then
-# the salt states could have us pointed to ourselves as the repo server
-packages:
- - httpd
-
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 4 - 30
base/test_instance/main.tf

@@ -1,32 +1,3 @@
-locals {
-  owner = var.aws_partition == "aws-us-gov" ? "513442679011" : "099720109477"
-}
-
-data "aws_ami" "ubuntu" {
-  most_recent = true
-  owners = [ local.owner ]
-
-  filter {
-    name   = "name"
-    values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
-  }
-
-  #filter {
-  #  name   = "image-id"
-  #  values = [ "ami-0a4050943619c0460" ]
-  #}
-
-  filter {
-    name   = "root-device-type"
-    values = ["ebs"]
-  }
-
-  filter {
-    name   = "virtualization-type"
-    values = ["hvm"]
-  }
-}
-
 module "test_instance" {
   source                 = "terraform-aws-modules/ec2-instance/aws"
   version                = "~> 2.0"
@@ -35,7 +6,7 @@ module "test_instance" {
   instance_count         = var.create_test_instance ? 1 : 0
   disable_api_termination = false # the test instance can always be destroyed
 
-  ami                    = data.aws_ami.ubuntu.image_id
+  ami                    = local.ami_map[var.test_instance_ami]
   instance_type          = "t3a.micro"
   key_name               = var.test_instance_key_name
   vpc_security_group_ids = var.security_group_ids
@@ -56,6 +27,9 @@ data "template_file" "cloud-init" {
     hostname = "test_instance"
     fqdn = "test_instance.${var.dns_info["private"]["zone"]}"
     environment = var.environment
+    saltmaster  = "salt-master.${var.dns_info["private"]["zone"]}"
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
   }
 }
 

+ 9 - 9
base/test_instance/vars.tf

@@ -12,23 +12,23 @@ variable "tags" {
   default     = { }
 }
 
-# ----------------------------------
-# Below this line are variables inherited from higher levels, so they
-# do not need to be explicitly passed to this module.
-variable "create_test_instance" {
-  type        = bool
-}
-
-variable "aws_marketplace_ubuntu_owner_id" {
+variable "test_instance_ami" { 
   type = string
+  default = "minion"
 }
 
-variable "test_instance_key_name" {
+variable "test_instance_type" { 
   type = string
+  default = "t3a.micro"
 }
 
+variable "create_test_instance" { type = bool }
+variable "test_instance_key_name" { type = string }
+
 variable "dns_info" { type = map }
 variable "standard_tags" { type = map }
 variable "environment" { type = string }
 variable "aws_region" { type = string }
 variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "common_services_account" { type = string }

+ 16 - 4
submodules/dns/private_A_record/main.tf

@@ -1,9 +1,20 @@
 locals {
   # For reverse dns:
+  # 0) Only take the first address
+  first_address = var.ip_addresses[0]
+
   # 1) Split the ip addresses into 4 octets
-  octets = [ for ip in var.ip_addresses: regex("^(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)$", ip) ]
+  octets = regex("^(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)$", local.first_address)
+
   # 2) Reverse them, but only if the first octet is 10 (maybe handle 192.168 and 172.16 later), and if enabled
-  reverse_addresses = [ for octets in local.octets: join(".", reverse(slice(octets, 1, 4))) if octets[0] == "10" && var.enabled == true ] 
+  # NOTE: This used to ignore any that don't start with 10, but that causes headaches because:
+  # > The "for_each" value depends on resource attributes that cannot be determined
+  # > until apply, so Terraform cannot predict how many instances will be created.
+  # > To work around this, use the -target argument to first apply only the
+  # > resources that the for_each depends on.
+  # So, we create the reverse no matter what, which means you should pass in 'reverse_disabled' if you're not
+  # assignign out of the 10. network.
+  reverse_address = join(".", reverse(slice(local.octets, 1, 4)))
 }
 
 resource "aws_route53_record" "dns" {
@@ -18,8 +29,9 @@ resource "aws_route53_record" "dns" {
 }
 
 resource "aws_route53_record" "reverse_dns" {
-  for_each = toset(local.reverse_addresses)
-  name = each.value
+  count = var.enabled && var.reverse_enabled ? 1 : 0
+
+  name = local.reverse_address
   type = "PTR"
   ttl  = 300
   zone_id = var.dns_info["reverse"]["zone_id"]

+ 6 - 0
submodules/dns/private_A_record/vars.tf

@@ -4,6 +4,12 @@ variable "enabled" {
   default = true
 }
 
+variable "reverse_enabled" { 
+  description = "Set to false to skip creation of reverse DNS. This can be useful when first creating resources when the private IP cannot be determined. 'enabled' must be true as well."
+  type = bool 
+  default = true
+}
+
 variable "name" { type = string }
 variable "ip_addresses" { type = list }
 variable "dns_info" { type = map }

+ 13 - 0
submodules/security_group/required_group/README.md

@@ -0,0 +1,13 @@
+# Required Security Group for all instances
+
+## Inbound:
+* Full access from the scanner networks
+* SSH access from the VPN and Bastion networks
+* ICMP types 0-8 from the 10 network
+
+## Outbound:
+* ICMP types 0-8 anywhere
+* DNS Access
+* Access to Proxy
+* Access to Salt
+* Access to Sensu

+ 186 - 0
submodules/security_group/required_group/main.tf

@@ -0,0 +1,186 @@
+data "aws_vpc" "this" {
+  id = var.vpc_id
+}
+
+data "aws_prefix_list" "private_s3" {
+  filter {
+    name = "prefix-list-name"
+    values = [ "com.amazonaws.*.s3" ]
+  }
+}
+
+locals {
+  vpc_name = lookup(data.aws_vpc.this.tags, "Name", data.aws_vpc.this.cidr_block)
+}
+
+resource "aws_security_group" "security_group" {
+  name = "required_group"
+  description = "Required Security Group for VPC ${local.vpc_name} (${var.vpc_id})"
+  vpc_id = var.vpc_id
+  tags = merge(var.tags, { "Name" = "required_group", "vpc_name" = local.vpc_name })
+}
+
+## Ingress
+resource "aws_security_group_rule" "scanner_access" {
+  security_group_id = aws_security_group.security_group.id
+  type = "ingress"
+  description = "Full Access from Security Scanners"
+  from_port = 0
+  to_port = 0
+  protocol = -1
+  cidr_blocks = var.cidr_map["scanners"]
+  count = length(var.cidr_map["scanners"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "ssh_access" {
+  security_group_id = aws_security_group.security_group.id
+  type = "ingress"
+  description = "SSH Access"
+  from_port = 22
+  to_port = 22
+  protocol = "tcp"
+  cidr_blocks = concat(var.cidr_map["bastions"], var.cidr_map["vpns"])
+  count = length(concat(var.cidr_map["bastions"], var.cidr_map["vpns"])) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "ping_inbound" {
+  security_group_id = aws_security_group.security_group.id
+  type = "ingress"
+  description = "Inbound Pings"
+  from_port = -1
+  to_port = -1
+  protocol = "icmp"
+  cidr_blocks = [ "10.0.0.0/8" ]
+}
+
+## Outbound:
+resource "aws_security_group_rule" "ping_outbound" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Outbound Pings"
+  from_port = -1
+  to_port = -1
+  protocol = "icmp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+}
+
+resource "aws_security_group_rule" "dns_access_tcp" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Outbound TCP DNS"
+  from_port = 53
+  to_port = 53
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["dns"]
+  count = length(var.cidr_map["dns"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "dns_access_udp" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Outbound UDP DNS"
+  from_port = 53
+  to_port = 53
+  protocol = "udp"
+  cidr_blocks = var.cidr_map["dns"]
+  count = length(var.cidr_map["dns"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "outbound_to_salt_masters" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Connect to Salt Masters"
+  from_port = 4505
+  to_port = 4506
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["salt"]
+  count = length(var.cidr_map["salt"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "outbound_to_web_servers_80" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Connect to Repo Servers"
+  from_port = 80
+  to_port = 80
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["web"]
+  count = length(var.cidr_map["web"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "outbound_to_web_servers_443" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Connect to Repo Servers"
+  from_port = 443
+  to_port = 443
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["web"]
+  count = length(var.cidr_map["web"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "outbound_to_mailrelay_25" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Outbound Email to mailrelay"
+  from_port = 25
+  to_port = 25
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["smtp"]
+  count = length(var.cidr_map["smtp"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "outbound_to_ec2_s3_endpoint" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Outbound to S3 endpoint"
+  from_port = 443
+  to_port = 443
+  protocol = "tcp"
+  prefix_list_ids = [ data.aws_prefix_list.private_s3.id ]
+  count = length([ data.aws_prefix_list.private_s3.id ]) > 0 ? 1 : 0 # todo: handle case of no s3 prefix list
+}
+
+resource "aws_security_group_rule" "outbound_to_sensu" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Monitoring Outbound"
+  from_port = 8081
+  to_port   = 8081
+  protocol  = "tcp"
+  cidr_blocks = var.cidr_map["monitoring"]
+  count = length(var.cidr_map["monitoring"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "outbound_to_moose_s2s" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description              = "Splunk UF outbound to Moose Indexers"
+  from_port = 9997
+  to_port   = 9998
+  protocol  = "tcp"
+  cidr_blocks = var.cidr_map["moose"]
+  count = length(var.cidr_map["moose"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "outbound_to_moose_idxc" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description              = "Outbound IDXC Discovery to MOOSE"
+  from_port = 8089
+  to_port   = 8089
+  protocol  = "tcp"
+  cidr_blocks = var.cidr_map["moose"]
+  count = length(var.cidr_map["moose"]) > 0 ? 1 : 0
+}
+
+resource "aws_security_group_rule" "outbound_to_moose_hec" {
+  security_group_id = aws_security_group.security_group.id
+  type = "egress"
+  description = "Connect to HEC"
+  from_port = 8088
+  to_port = 8088
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["moose"]
+  count = length(var.cidr_map["moose"]) > 0 ? 1 : 0
+}

+ 3 - 0
submodules/security_group/required_group/outputs.tf

@@ -0,0 +1,3 @@
+output "id" {
+  value = aws_security_group.security_group.id
+}

+ 5 - 0
submodules/security_group/required_group/vars.tf

@@ -0,0 +1,5 @@
+variable "vpc_id" { type = string }
+variable "cidr_map" { type = map }
+variable "tags" { type = map }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }