Преглед изворни кода

Merge pull request #256 from mdr-engineering/feature/ftd_MSOCI-1795_CustomerFacingSearchHead

Customer Facing ("public"-ish") Search Head
Frederick Damstra пре 4 година
родитељ
комит
456f596540

+ 1 - 2
base/kinesis_firehose_waf_logs/main.tf

@@ -16,7 +16,7 @@ resource "aws_kinesis_firehose_delivery_stream" "aws-waf-logs-splunk" {
   }
 
   splunk_configuration {
-    hec_endpoint               = "https://${var.hec_pub}:8088"
+    hec_endpoint               = "https://${var.hec_pub_ack}:8088"
     hec_token                  = var.aws_waf_logs_hec_token
     hec_acknowledgment_timeout = 600
     hec_endpoint_type          = "Raw"
@@ -28,7 +28,6 @@ resource "aws_kinesis_firehose_delivery_stream" "aws-waf-logs-splunk" {
     }
   }
 
-
   tags = merge(var.standard_tags, var.tags)
 }
 

+ 1 - 1
base/kinesis_firehose_waf_logs/vars.tf

@@ -6,7 +6,7 @@ variable "tags" {
 
 variable "cloudtrail_key_arn" { type = string }
 variable "aws_waf_logs_hec_token" { type = string }
-variable "hec_pub" { type = string }
+variable "hec_pub_ack" { type = string }
 variable "standard_tags" { type = map }
 variable "account_name" { type = string }
 variable "aws_account_id" { type = string }

+ 1 - 0
base/splunk_servers/customer_searchhead/amis.tf

@@ -0,0 +1 @@
+../../amis.tf

+ 31 - 0
base/splunk_servers/customer_searchhead/certificate.tf

@@ -0,0 +1,31 @@
+#Certificate 
+resource "aws_acm_certificate" "cert" {
+  domain_name       = "${local.alb_name}.${var.dns_info["public"]["zone"]}"
+  validation_method = "DNS"
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_acm_certificate_validation" "cert" {
+  certificate_arn         = aws_acm_certificate.cert.arn
+  validation_record_fqdns = [for record in aws_route53_record.cert_validation: record.fqdn]
+}
+
+resource "aws_route53_record" "cert_validation" {
+  provider = aws.mdr-common-services-commercial
+
+  for_each = {
+    for dvo in aws_acm_certificate.cert.domain_validation_options : dvo.domain_name => {
+      name   = dvo.resource_record_name
+      record = dvo.resource_record_value
+      type   = dvo.resource_record_type
+    }
+  }
+
+  allow_overwrite = true
+  name            = each.value.name
+  records         = [each.value.record]
+  ttl             = 60
+  type            = each.value.type
+  zone_id         = var.dns_info["public"]["zone_id"]
+}

+ 78 - 0
base/splunk_servers/customer_searchhead/cloud-init/cloud-init.tpl

@@ -0,0 +1,78 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    proxy_host: ${proxy}
+    proxy_port: 80
+  path: /etc/salt/minion.d/proxy.conf
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.${ aws_region }.amazonaws.com,ec2messages.${ aws_region }.amazonaws.com,ec2.${ aws_region }.amazonaws.com,ssmmessages.${ aws_region }.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+      splunk_prefix: ${ splunk_prefix }
+      aws_region: ${ aws_region }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 80 - 0
base/splunk_servers/customer_searchhead/cloud-init/opt_splunk.boothook

@@ -0,0 +1,80 @@
+#!/bin/bash
+#
+#
+exec > /dev/console
+exec 2>&1
+
+declare -A EBSMAP
+
+# Build a map of EBS NVMe disks from their AWS-API-name to their NVMe name
+# this makes an associative array (like a python hash) of the
+# sdX/xvdX name you'd set in AWS API to the corresponding nvmeX name
+# Thanks Fred for the awesome id-ctrl stuff I'd never seen before
+#
+# One interesting side effect observed:  the id-ctrl output is different when
+# volumes are attached at boot time (no /dev/) versus attached after the OS
+# is started (includes /dev/)
+function make_nve_ebs_map {
+        for DEVICE in $( lsblk -d -o NAME,MODEL -n | egrep "Elastic Block Store" | awk '{ print $1 }' ); do
+                UNDERLYING=$( nvme id-ctrl --raw-binary /dev/${DEVICE} 2>/dev/null | cut -c 3073-3104 | tr -d ' ' | sed "s#/dev/##" )
+
+                EBSMAP[$UNDERLYING]=$DEVICE
+                UNDERLYING2=$( echo $UNDERLYING | sed "s/sd/xvd/" )
+                EBSMAP[$UNDERLYING2]=$DEVICE
+        done
+}
+
+function do_the_mount
+{
+	VOL_LABEL=$1
+	VOLUME=$2
+	MOUNTPOINT=$3
+
+
+	DONE=0
+	TRIES=0
+	while [[ $DONE -ne 1 ]] && [[ $TRIES -lt 20 ]]; do
+		echo "Looking for $VOLUME to come attached"
+		make_nve_ebs_map
+
+		#echo "------- current nvme/ebs map -------"
+		#for K in "${!EBSMAP[@]}"; do echo $K  = ${EBSMAP[$K]} ; done
+		#echo "------- end current nvme/ebs map -------"
+
+		if [[ -b /dev/$VOLUME ]]; then
+			DEV="/dev/$VOLUME"
+			DONE=1
+		elif [[ -b /dev/${EBSMAP[$VOLUME]} ]]; then
+			DEV="/dev/${EBSMAP[$VOLUME]}"
+			DONE=1
+		else
+			sleep 10
+			TRIES=$(( $TRIES + 1 ))
+		fi
+
+		echo "Volume $VOLUME available at $DEV"
+	done
+
+	if ! [[ -d ${MOUNTPOINT} ]]; then
+		echo "Creating mount directory ${MOUNTPOINT}"
+		mkdir -p ${MOUNTPOINT}
+	fi
+
+	if ! blkid -l -t LABEL=${VOL_LABEL}; then
+		echo "Making filesystem for LABEL=${VOL_LABEL} on ${DEV}"
+		mkfs.xfs -L ${VOL_LABEL} ${DEV}
+	fi
+
+	if ! egrep -q "LABEL=${VOL_LABEL}" /etc/fstab; then
+		echo "Adding LABEL=${VOL_LABEL} to /etc/fstab"
+		echo "LABEL=${VOL_LABEL}       ${MOUNTPOINT}    xfs    noatime,nofail  0 2" >> /etc/fstab
+	fi
+
+	if ! mountpoint ${MOUNTPOINT} >/dev/null 2>&1; then
+		echo "Mounting ${MOUNTPOINT}"
+		mount ${MOUNTPOINT}
+	fi
+
+}
+
+do_the_mount opt_splunk xvdf /opt/splunk

+ 145 - 0
base/splunk_servers/customer_searchhead/elb.tf

@@ -0,0 +1,145 @@
+locals {
+  # alb_clients access the SH
+  alb_clients = toset(concat(
+    var.cidr_map["vpc-access"], # VPN users
+    var.cidr_map["vpc-system-services"], # Salt master, etc
+    var.cidr_map["vpc-private-services"], # fm-shared search, qcompliance, phantom
+    var.trusted_ips,
+    var.splunk_customer_cidrs,
+  ))
+}
+
+resource "aws_lb" "searchhead-alb" {
+  name               = var.alb_name != "" ? "${local.alb_name}-alb" : "${var.prefix}-cust-sh"
+  internal           = true
+  load_balancer_type = "application"
+  # Not supported for NLB
+  security_groups    = [aws_security_group.searchhead-alb-sg.id]
+  # Note, changing subnets results in recreation of the resource
+  subnets            = var.public_subnets
+  enable_cross_zone_load_balancing = true
+
+  access_logs {
+    bucket  = "xdr-elb-${ var.environment }"
+    enabled = true
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+#########################
+# Listeners
+resource "aws_lb_listener" "searchhead-alb-listener-https" {
+  load_balancer_arn = aws_lb.searchhead-alb.arn
+  port              = "443"
+  protocol          = "HTTPS"
+  ssl_policy        = "ELBSecurityPolicy-FS-1-2-Res-2019-08" # PFS, TLS1.2, most "restrictive" policy (took awhile to find that)
+  certificate_arn   = aws_acm_certificate.cert.arn
+
+  default_action {
+    type             = "forward"
+    target_group_arn = aws_lb_target_group.searchhead-alb-target-8000.arn
+  }
+}
+
+# Redirect HTTP to HTTPS
+resource "aws_lb_listener" "searchhead-alb-listener-http" {
+  load_balancer_arn = aws_lb.searchhead-alb.arn
+  port              = "80"
+  protocol          = "HTTP"
+
+  default_action {
+    type             = "redirect"
+
+    redirect {
+      port        = "443"
+      protocol    = "HTTPS"
+      status_code = "HTTP_301"
+    }
+  }
+}
+
+#########################
+# Targets
+resource "aws_lb_target_group" "searchhead-alb-target-8000" {
+  name     = var.alb_name != "" ? "${local.alb_name}-customer-alb-target-8000" : "${var.prefix}-cust-alb-8000"
+  port     = 8000
+  protocol = "HTTPS"
+  target_type = "instance"
+  vpc_id   = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+
+  health_check {
+    enabled = true
+    path = "/en-US/account/login?return_to=%2Fen-US%2F"
+    port = 8000
+    protocol = "HTTPS"
+  }
+
+  # Stickiness is not needed here, but we'll need it if we add SHs
+  stickiness {
+    type = "lb_cookie"
+    cookie_duration = 86400 # 1 day
+    enabled = true
+  }
+}
+
+resource "aws_lb_target_group_attachment" "searchhead-alb-target-8000-instance" {
+  target_group_arn = aws_lb_target_group.searchhead-alb-target-8000.arn
+  target_id        = aws_instance.instance.id
+  port             = 8000
+}
+
+#########################
+# Security Group for ALB
+resource "aws_security_group" "searchhead-alb-sg" {
+  name = var.alb_name != "" ? "${local.alb_name}-customer-alb-sh" : "${var.prefix}-customer-sh-alb-sg"
+  description = "Security Group for the Customer Searchhead ALB"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group_rule" "searchhead-alb-https-in" {
+  type              = "ingress"
+  from_port         = 443
+  to_port           = 443
+  protocol          = "tcp"
+  cidr_blocks       = local.alb_clients
+  security_group_id = aws_security_group.searchhead-alb-sg.id
+}
+
+resource "aws_security_group_rule" "searchhead-http-in" {
+  # Port 80 is open as a redirect to 443
+  type              = "ingress"
+  from_port         = 80
+  to_port           = 80
+  protocol          = "tcp"
+  cidr_blocks       = local.alb_clients
+  security_group_id = aws_security_group.searchhead-alb-sg.id
+}
+
+resource "aws_security_group_rule" "searchhead-alb-8000-out" {
+  type              = "egress"
+  from_port         = 8000
+  to_port           = 8000
+  protocol          = "tcp"
+  # Maybe should limit to the local vpc, but I don't readily have that cidr available
+  cidr_blocks       = [ var.vpc_cidr ]
+  security_group_id = aws_security_group.searchhead-alb-sg.id
+}
+
+#########################
+# DNS Entry
+module "public_dns_record_hec_ack" {
+  source = "../../../submodules/dns/public_ALIAS_record"
+
+  name = local.alb_name
+
+  target_dns_name = aws_lb.searchhead-alb.dns_name
+  target_zone_id  = aws_lb.searchhead-alb.zone_id
+  dns_info = var.dns_info
+
+  providers = {
+    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+  }
+}

+ 271 - 0
base/splunk_servers/customer_searchhead/main.tf

@@ -0,0 +1,271 @@
+# Some instance variables
+locals {
+  ami_selection = "minion" # master, minion, ...
+  instance_name = var.instance_name != "" ? var.instance_name : "${ var.prefix }-splunk-cust-sh"
+  alb_name = var.alb_name != "" ? var.alb_name : "${ var.prefix }-splunk"
+}
+
+# Rather than pass in the aws security group, we just look it up. This will
+# probably be useful other places, as well.
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "instance" {
+  subnet_id = var.public_subnets[0]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.searchhead_security_group.id ]
+  description = local.instance_name
+  tags = merge(var.standard_tags, var.tags, { Name = local.instance_name })
+}
+
+resource "aws_instance" "instance" {
+  #availability_zone = var.azs[count.index % 2]
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "msoc-default-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+    volume_type = "gp3"
+    volume_size = var.splunk_volume_sizes["customer_searchhead"]["/"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # /opt/splunk
+    # Note: Not in AMI
+    device_name = "/dev/xvdf"
+    volume_type = "gp3"
+    volume_size = var.splunk_volume_sizes["customer_searchhead"]["/opt/splunk"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    volume_type = "gp3"
+    volume_size = var.splunk_volume_sizes["customer_searchhead"]["swap"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    volume_type = "gp3"
+    volume_size = var.splunk_volume_sizes["customer_searchhead"]["/home"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    volume_type = "gp3"
+    volume_size = var.splunk_volume_sizes["customer_searchhead"]["/var"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    volume_type = "gp3"
+    volume_size = var.splunk_volume_sizes["customer_searchhead"]["/var/tmp"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    volume_type = "gp3"
+    volume_size = var.splunk_volume_sizes["customer_searchhead"]["/var/log"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    volume_type = "gp3"
+    volume_size = var.splunk_volume_sizes["customer_searchhead"]["/var/log/audit"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    volume_type = "gp3"
+    volume_size = var.splunk_volume_sizes["customer_searchhead"]["/tmp"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.instance.id
+  }
+
+  user_data = data.template_cloudinit_config.cloud-init.rendered
+  tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
+  volume_tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
+}
+
+module "private_dns_record" {
+  source = "../../../submodules/dns/private_A_record"
+
+  name = local.instance_name
+  ip_addresses = [ aws_instance.instance.private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}
+
+data "template_file" "cloud-init" {
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
+
+  vars = {
+    hostname = local.instance_name
+    fqdn = "${local.instance_name}.${var.dns_info["private"]["zone"]}"
+    splunk_prefix = var.prefix
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+    aws_region = var.aws_region
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud-init" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = data.template_file.cloud-init.rendered
+  }
+
+  # mount /dev/xvdf at /opt/splunk
+  part {
+    content_type = "text/cloud-boothook"
+    content      = file("${path.module}/cloud-init/opt_splunk.boothook")
+  }
+}
+
+## Searchhead
+#
+# Summary:
+#   Ingress:
+#     tcp/8000      - Splunk Web                 - vpc-access, Phantom
+#     tcp/8000      - Splunk Web                 - Entire VPC
+#     tcp/8089      - Splunk API                 - vpc-access, Phantom
+#     tcp/8089      - Splunk API + IDX Discovery - Entire VPC
+#     tcp/9997-9998 - Splunk Data                - Entire VPC
+#
+#   Ingress:
+#     tcp/8089      - Splunk Web                 - vpc-system-services (for salt inventory and portal lambda)
+#
+#   Egress:
+#     tcp/8089      - Splunk API + IDX Discovery - Entire VPC
+resource "aws_security_group" "searchhead_security_group" {
+  name = "${ var.prefix }_customer_searchhead_security_group"
+  description = "Security Group for Splunk Customer Searchhead Instance(s)"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+# Ingress
+resource "aws_security_group_rule" "splunk-web-in" {
+  description       = "Web access"
+  type              = "ingress"
+  from_port         = 8000
+  to_port           = 8000
+  protocol          = "tcp"
+  cidr_blocks       = toset(concat(var.cidr_map["vpc-access"], 
+                                   var.cidr_map["vpc-private-services"], 
+                                   [ var.vpc_cidr ], 
+                      ))
+  security_group_id = aws_security_group.searchhead_security_group.id
+}
+
+resource "aws_security_group_rule" "splunk-api-in" {
+  description       = "Splunk API"
+  type              = "ingress"
+  from_port         = 8089
+  to_port           = 8089
+  protocol          = "tcp"
+  cidr_blocks       = toset(concat(var.cidr_map["vpc-access"], 
+                                   var.cidr_map["vpc-private-services"], 
+                                   var.cidr_map["vpc-splunk"], # MC
+                                   [ var.vpc_cidr ], 
+                                   var.cidr_map["vpc-system-services"], # for salt inventory and Portal lambda
+                      ))
+  security_group_id = aws_security_group.searchhead_security_group.id
+}
+
+# Egress
+resource "aws_security_group_rule" "splunk-api-out" {
+  description       = "Splunk API Outbound to talk to indexers"
+  type              = "egress"
+  from_port         = 8089
+  to_port           = 8089
+  protocol          = "tcp"
+  cidr_blocks       = [ var.vpc_cidr ]
+  security_group_id = aws_security_group.searchhead_security_group.id
+}
+
+resource "aws_security_group_rule" "splunk-data-out" {
+  description       = "Splunk Data Outbound to talk to own indexers"
+  type              = "egress"
+  from_port         = 9997
+  to_port           = 9998
+  protocol          = "tcp"
+  cidr_blocks       = [ var.vpc_cidr ]
+  security_group_id = aws_security_group.searchhead_security_group.id
+}

+ 7 - 0
base/splunk_servers/customer_searchhead/outputs.tf

@@ -0,0 +1,7 @@
+output instance_arn {
+  value = aws_instance.instance.arn
+}
+
+output instance_private_ip {
+  value = aws_instance.instance.private_ip
+}

+ 72 - 0
base/splunk_servers/customer_searchhead/vars.tf

@@ -0,0 +1,72 @@
+variable "splunk_customer_cidrs" {
+  description = "List of customer's cidrs that can access the SH"
+  type = list(string)
+}
+
+variable "instance_name" {
+  description = "[Optional] Override the Instance Name"
+  type = string
+  default = ""
+}
+
+variable "alb_name" {
+  description = "[Optional] Override the ALB Name"
+  type = string
+  default = ""
+}
+
+variable "prefix" {
+  description = "Prefix for Instance Names"
+  type = string
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "splunk_volume_sizes" {
+  type = map(map(number))
+}
+
+variable "public_subnets" { type = list(string) }
+variable "private_subnets" { type = list(string) }
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "vpc_cidr" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "aws_account_id" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/splunk_servers/customer_searchhead/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.13"
+}

+ 84 - 0
base/splunk_servers/customer_searchhead/waf.tf

@@ -0,0 +1,84 @@
+# trussworks/wafv2/aws has a basic WAF with the AWS Managed Ruleset
+# See https://registry.terraform.io/modules/trussworks/wafv2/aws/latest
+#
+# Attempted to add some sane defaults so we can customize as needed
+resource "aws_wafv2_ip_set" "ipset" {
+  name = "blocked_ips"
+
+  scope              = "REGIONAL"
+  ip_address_version = "IPV4"
+
+  addresses = [
+  ]
+}
+
+module "wafv2" {
+  source = "trussworks/wafv2/aws"
+  version = "~> 2.0"
+
+  name   = local.alb_name
+  scope = "REGIONAL"
+
+  alb_arn       = aws_lb.searchhead-alb.arn
+  associate_alb = true
+
+  ip_sets_rule = [
+    {
+      name       = "blocked_ips"
+      action     = "block"
+      priority   = 1
+      ip_set_arn = aws_wafv2_ip_set.ipset.arn
+    }
+  ]
+
+  # A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span
+  ip_rate_based_rule = {
+    name     = "Rate_Limit"
+    priority = 5
+    limit    = 900 # 900 requests per 5 minutes= 3 requests/second (sustained for 5 minutes)
+    action   = "block"
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_wafv2_web_acl_logging_configuration" "waf_logs" {
+  log_destination_configs = [ "arn:${var.aws_partition}:firehose:${var.aws_region}:${var.aws_account_id}:deliverystream/aws-waf-logs-splunk" ]
+  resource_arn            = module.wafv2.web_acl_id
+
+#  logging_filter {
+#    default_behavior = "KEEP"
+#
+#    filter {
+#      behavior = "DROP"
+#
+#      condition {
+#        action_condition {
+#          action = "COUNT"
+#        }
+#      }
+#
+#      condition {
+#        label_name_condition {
+#          label_name = "awswaf:111122223333:rulegroup:testRules:LabelNameZ"
+#        }
+#      }
+#
+#      requirement = "MEETS_ALL"
+#    }
+#
+#    filter {
+#      behavior = "KEEP"
+#
+#      condition {
+#        action_condition {
+#          action = "ALLOW"
+#        }
+#      }
+#
+#      requirement = "MEETS_ANY"
+#    }
+#  }
+}
+
+