Quellcode durchsuchen

Adds search head and heavy forwarder modules

* Also simplifies security groups to use vpc-* names
* Also makes some minor tf11->tf12 syntax updates
Fred Damstra vor 4 Jahren
Ursprung
Commit
c0f32a7cbd

+ 5 - 4
base/splunk_servers/cluster_master/main.tf

@@ -145,7 +145,7 @@ module "private_dns_record" {
 
 data "template_file" "cloud-init" {
   # Should these be in a common directory? I suspect they'd be reusable
-  template = "${file("${path.module}/cloud-init/cloud-init.tpl")}"
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
 
   vars = {
     hostname = local.instance_name
@@ -168,7 +168,7 @@ data "template_cloudinit_config" "cloud-init" {
   part {
     filename     = "init.cfg"
     content_type = "text/cloud-config"
-    content      = "${data.template_file.cloud-init.rendered}"
+    content      = data.template_file.cloud-init.rendered
   }
 
   # Additional parts as needed
@@ -212,7 +212,8 @@ resource "aws_security_group_rule" "splunk-web-in" {
   from_port         = 8000
   to_port           = 8000
   protocol          = "tcp"
-  cidr_blocks       = toset(concat(var.cidr_map["bastions"], var.cidr_map["vpns"]))
+  #cidr_blocks       = toset(concat(var.cidr_map["bastions"], var.cidr_map["vpns"]))
+  cidr_blocks       = var.cidr_map["vpc-access"]
   security_group_id = aws_security_group.cluster_master_security_group.id
 }
 
@@ -222,7 +223,7 @@ resource "aws_security_group_rule" "splunk-api-in" {
   from_port         = 8089
   to_port           = 8089
   protocol          = "tcp"
-  cidr_blocks       = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ], var.cidr_map["bastions"], var.cidr_map["vpns"]))
+  cidr_blocks       = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ], var.cidr_map["vpc-access"]))
   security_group_id = aws_security_group.cluster_master_security_group.id
 }
 

+ 1 - 0
base/splunk_servers/heavy_forwarder/amis.tf

@@ -0,0 +1 @@
+../../amis.tf

+ 60 - 0
base/splunk_servers/heavy_forwarder/cloud-init/cloud-init.tpl

@@ -0,0 +1,60 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 242 - 0
base/splunk_servers/heavy_forwarder/main.tf

@@ -0,0 +1,242 @@
+# Some instance variables
+locals {
+  ami_selection = "minion" # master, minion, ...
+  instance_name = "${ var.prefix }-splunk-sh"
+  is_moose = length(regexall("moose", var.prefix)) > 0 ? true : false
+}
+
+# Rather than pass in the aws security group, we just look it up. This will
+# probably be useful other places, as well.
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "instance" {
+  subnet_id = var.subnets[0]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.heavy_forwarder_security_group.id ]
+  description = local.instance_name
+  tags = merge(var.standard_tags, var.tags, { Name = local.instance_name })
+}
+
+resource "aws_instance" "instance" {
+  #availability_zone = var.azs[count.index % 2]
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "msoc-default-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp2"
+      #volume_size = "60"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    volume_size = 48
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.instance.id
+  }
+
+  user_data = data.template_cloudinit_config.cloud-init.rendered
+  tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
+  volume_tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
+}
+
+module "private_dns_record" {
+  source = "../../../submodules/dns/private_A_record"
+
+  name = local.instance_name
+  ip_addresses = [ aws_instance.instance.private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}
+
+data "template_file" "cloud-init" {
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
+
+  vars = {
+    hostname = local.instance_name
+    fqdn = "${local.instance_name}.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud-init" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = data.template_file.cloud-init.rendered
+  }
+
+  # Additional parts as needed
+  #part {
+  #  content_type = "text/x-shellscript"
+  #  content      = "ffbaz"
+  #}
+}
+
+## Heavy Forwarder
+#
+# Summary:
+#   Ingress:
+#     tcp/8000      - Splunk Web                 - vpc-access, legacy openvpn, legacy bastion
+#     tcp/8089      - Splunk API                 - vpc-access, legacy openvpn, legacy bastion
+#
+#   Egress:
+#     tcp/8089      - Splunk API + IDX Discovery - Entire VPC + var.splunk_legacy_cidr
+#     tcp/9997      - Splunk Data                - Entire VPC + var.splunk_legacy_cidr
+resource "aws_security_group" "heavy_forwarder_security_group" {
+  name = "heavy_forwarder_security_group"
+  description = "Security Group for Splunk Searchhead Instance(s)"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+# Ingress
+resource "aws_security_group_rule" "splunk-web-in" {
+  description       = "Web access"
+  type              = "ingress"
+  from_port         = 8000
+  to_port           = 8000
+  protocol          = "tcp"
+  # Leaving these commented, as we'll probably need to add to this rule
+  #cidr_blocks       = toset(concat(var.cidr_map["vpc-access"], var.cidr_map["vpc-private-services"]))
+  cidr_blocks       = var.cidr_map["vpc-access"]
+  security_group_id = aws_security_group.heavy_forwarder_security_group.id
+}
+
+resource "aws_security_group_rule" "splunk-api-in" {
+  description       = "Splunk API"
+  type              = "ingress"
+  from_port         = 8089
+  to_port           = 8089
+  protocol          = "tcp"
+  # Leaving these commented, as we'll probably need to add to this rule
+  #cidr_blocks       = toset(concat(var.cidr_map["vpc-access"], var.cidr_map["vpc-private-services"]))
+  cidr_blocks       = var.cidr_map["vpc-access"]
+  security_group_id = aws_security_group.heavy_forwarder_security_group.id
+}
+
+# Egress
+resource "aws_security_group_rule" "splunk-api-out" {
+  description       = "Splunk API Outbound to talk to indexers"
+  type              = "egress"
+  from_port         = 8089
+  to_port           = 8089
+  protocol          = "tcp"
+  cidr_blocks       = toset(concat([ var.vpc_cidr ], var.splunk_legacy_cidr))
+  security_group_id = aws_security_group.heavy_forwarder_security_group.id
+}
+
+resource "aws_security_group_rule" "splunk-data-out" {
+  description       = "Splunk Data Outbound to talk to indexers"
+  type              = "egress"
+  from_port         = 9997
+  to_port           = 9998
+  protocol          = "tcp"
+  cidr_blocks       = toset(concat([ var.vpc_cidr ], var.splunk_legacy_cidr))
+  security_group_id = aws_security_group.heavy_forwarder_security_group.id
+}

+ 7 - 0
base/splunk_servers/heavy_forwarder/outputs.tf

@@ -0,0 +1,7 @@
+output instance_arn {
+  value = aws_instance.instance.arn
+}
+
+output instance_private_ip {
+  value = aws_instance.instance.private_ip
+}

+ 57 - 0
base/splunk_servers/heavy_forwarder/vars.tf

@@ -0,0 +1,57 @@
+variable "prefix" {
+  description = "Prefix for Instance Names"
+  type = string
+}
+
+variable "splunk_legacy_cidr" {
+  description = "The legacy CIDR block(s)"
+  default = []
+  type = list(string)
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "subnets" {
+  type = list(string)
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "vpc_cidr" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/splunk_servers/heavy_forwarder/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.13"
+}

+ 1 - 1
base/splunk_servers/indexer_cluster/cloudinit.tf

@@ -1,6 +1,6 @@
 data "template_file" "cloud-init" {
   # Should these be in a common directory? I suspect they'd be reusable
-  template = "${file("${path.module}/cloud-init/cloud-init.tpl")}"
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
 
   vars = {
     prefix = var.prefix

+ 1 - 1
base/splunk_servers/indexer_cluster/nlb-splunk-data.tf

@@ -25,7 +25,7 @@ resource "aws_lb" "nlb" {
   name               = "${var.prefix}-splunk-indexers-nlb"
   internal           = false
   load_balancer_type = "network"
-  #subnets            = "${data.terraform_remote_state.infra.public_subnets}"
+  #subnets            = data.terraform_remote_state.infra.public_subnets
 
   subnet_mapping {
     subnet_id = element(var.public_subnets,0)

+ 4 - 1
base/splunk_servers/indexer_cluster/security-group-indexers.tf

@@ -16,7 +16,7 @@
 #     tcp/8089      - Splunk API + IDX Discovery - (local.splunk_vpc_cidrs) Entire VPC + var.splunk_legacy_cidr
 locals {
   splunk_vpc_cidrs = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ]))
-  access_cidrs     = toset(concat(var.cidr_map["bastions"], var.cidr_map["vpns"]))
+  access_cidrs     = var.cidr_map["vpc-access"]
   data_sources     = toset(concat(tolist(local.splunk_vpc_cidrs), var.splunk_data_sources))
 }
 
@@ -66,6 +66,7 @@ resource "aws_security_group_rule" "splunk-api-in-access" {
   from_port         = 8089
   to_port           = 8089
   protocol          = "tcp"
+  # Note: This should not be data_sources, as we do not need to give remote sources access to indexer discovery
   cidr_blocks       = local.access_cidrs
   security_group_id = aws_security_group.indexer_security_group.id
 }
@@ -76,6 +77,7 @@ resource "aws_security_group_rule" "splunk-api-in-vpc" {
   from_port         = 8089
   to_port           = 8089
   protocol          = "tcp"
+  # Note: This should not be data_sources, as we do not need to give remote sources access to indexer discovery
   cidr_blocks       = local.splunk_vpc_cidrs
   security_group_id = aws_security_group.indexer_security_group.id
 }
@@ -87,6 +89,7 @@ resource "aws_security_group_rule" "splunk-api-in-moose" {
   from_port         = 8089
   to_port           = 8089
   protocol          = "tcp"
+  # Internal source _do_ use indexer discovery, so moose needs 10/8 open to the entirety.
   cidr_blocks       = [ "10.0.0.0/8" ]
   security_group_id = aws_security_group.indexer_security_group.id
 }

+ 1 - 0
base/splunk_servers/searchhead/amis.tf

@@ -0,0 +1 @@
+../../amis.tf

+ 60 - 0
base/splunk_servers/searchhead/cloud-init/cloud-init.tpl

@@ -0,0 +1,60 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 232 - 0
base/splunk_servers/searchhead/main.tf

@@ -0,0 +1,232 @@
+# Some instance variables
+locals {
+  ami_selection = "minion" # master, minion, ...
+  instance_name = "${ var.prefix }-splunk-sh"
+  is_moose = length(regexall("moose", var.prefix)) > 0 ? true : false
+}
+
+# Rather than pass in the aws security group, we just look it up. This will
+# probably be useful other places, as well.
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "instance" {
+  subnet_id = var.subnets[0]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.searchhead_security_group.id ]
+  description = local.instance_name
+  tags = merge(var.standard_tags, var.tags, { Name = local.instance_name })
+}
+
+resource "aws_instance" "instance" {
+  #availability_zone = var.azs[count.index % 2]
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "msoc-default-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp2"
+      #volume_size = "60"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    volume_size = 48
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.instance.id
+  }
+
+  user_data = data.template_cloudinit_config.cloud-init.rendered
+  tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
+  volume_tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
+}
+
+module "private_dns_record" {
+  source = "../../../submodules/dns/private_A_record"
+
+  name = local.instance_name
+  ip_addresses = [ aws_instance.instance.private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}
+
+data "template_file" "cloud-init" {
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
+
+  vars = {
+    hostname = local.instance_name
+    fqdn = "${local.instance_name}.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud-init" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = data.template_file.cloud-init.rendered
+  }
+
+  # Additional parts as needed
+  #part {
+  #  content_type = "text/x-shellscript"
+  #  content      = "ffbaz"
+  #}
+}
+
+## Searchhead
+#
+# Summary:
+#   Ingress:
+#     tcp/8000      - Splunk Web                 - vpc-access, legacy openvpn, legacy bastion, Phantom
+#     tcp/8089      - Splunk API                 - vpc-access, legacy openvpn, legacy bastion, Phantom
+#     tcp/8089      - Splunk API + IDX Discovery - Entire VPC + var.splunk_legacy_cidr 
+#
+#   Egress:
+#     tcp/8089      - Splunk API + IDX Discovery - Entire VPC + var.splunk_legacy_cidr
+resource "aws_security_group" "searchhead_security_group" {
+  name = "searchhead_security_group"
+  description = "Security Group for Splunk Searchhead Instance(s)"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+# Ingress
+resource "aws_security_group_rule" "splunk-web-in" {
+  description       = "Web access"
+  type              = "ingress"
+  from_port         = 8000
+  to_port           = 8000
+  protocol          = "tcp"
+  cidr_blocks       = toset(concat(var.cidr_map["vpc-access"], var.cidr_map["vpc-private-services"]))
+  security_group_id = aws_security_group.searchhead_security_group.id
+}
+
+resource "aws_security_group_rule" "splunk-api-in" {
+  description       = "Splunk API"
+  type              = "ingress"
+  from_port         = 8089
+  to_port           = 8089
+  protocol          = "tcp"
+  cidr_blocks       = toset(concat(var.cidr_map["vpc-access"], 
+                                   var.cidr_map["vpc-private-services"], 
+                                   var.splunk_legacy_cidr, 
+                                   [ var.vpc_cidr ], 
+                      ))
+  security_group_id = aws_security_group.searchhead_security_group.id
+}
+
+# Egress
+resource "aws_security_group_rule" "splunk-api-out" {
+  description       = "Splunk API Outbound to talk to indexers"
+  type              = "egress"
+  from_port         = 8089
+  to_port           = 8089
+  protocol          = "tcp"
+  cidr_blocks       = toset(concat([ var.vpc_cidr ], var.splunk_legacy_cidr))
+  security_group_id = aws_security_group.searchhead_security_group.id
+}

+ 7 - 0
base/splunk_servers/searchhead/outputs.tf

@@ -0,0 +1,7 @@
+output instance_arn {
+  value = aws_instance.instance.arn
+}
+
+output instance_private_ip {
+  value = aws_instance.instance.private_ip
+}

+ 57 - 0
base/splunk_servers/searchhead/vars.tf

@@ -0,0 +1,57 @@
+variable "prefix" {
+  description = "Prefix for Instance Names"
+  type = string
+}
+
+variable "splunk_legacy_cidr" {
+  description = "The legacy CIDR block(s)"
+  default = []
+  type = list(string)
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "subnets" {
+  type = list(string)
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "vpc_cidr" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/splunk_servers/searchhead/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.13"
+}

+ 3 - 3
submodules/splunk/splunk_indexer_asg/outputs.tf

@@ -1,7 +1,7 @@
 output asg_id {
-  value =  [ "${aws_autoscaling_group.splunk_indexer_asg.id}" ]
+  value =  [ aws_autoscaling_group.splunk_indexer_asg.id ]
 }
 
 output asg_name {
-  value =  [ "${aws_autoscaling_group.splunk_indexer_asg.name}" ]
-}
+  value =  [ aws_autoscaling_group.splunk_indexer_asg.name ]
+}