Procházet zdrojové kódy

Merge pull request #90 from mdr-engineering/feature/ftd_MSOCI-1445_FixesForMooseInGC

Fixes for Moose/Splunk in GovCloud
Frederick Damstra před 4 roky
rodič
revize
c83959a7af

+ 80 - 0
base/splunk_servers/cluster_master/cloud-init/opt_splunk.boothook

@@ -0,0 +1,80 @@
+#!/bin/bash
+#
+#
+exec > /dev/console
+exec 2>&1
+
+declare -A EBSMAP
+
+# Build a map of EBS NVMe disks from their AWS-API-name to their NVMe name
+# this makes an associative array (like a python hash) of the
+# sdX/xvdX name you'd set in AWS API to the corresponding nvmeX name
+# Thanks Fred for the awesome id-ctrl stuff I'd never seen before
+#
+# One interesting side effect observed:  the id-ctrl output is different when
+# volumes are attached at boot time (no /dev/) versus attached after the OS
+# is started (includes /dev/)
+function make_nve_ebs_map {
+        for DEVICE in $( lsblk -d -o NAME,MODEL -n | egrep "Elastic Block Store" | awk '{ print $1 }' ); do
+                UNDERLYING=$( nvme id-ctrl --raw-binary /dev/${DEVICE} 2>/dev/null | cut -c 3073-3104 | tr -d ' ' | sed "s#/dev/##" )
+
+                EBSMAP[$UNDERLYING]=$DEVICE
+                UNDERLYING2=$( echo $UNDERLYING | sed "s/sd/xvd/" )
+                EBSMAP[$UNDERLYING2]=$DEVICE
+        done
+}
+
+function do_the_mount
+{
+	VOL_LABEL=$1
+	VOLUME=$2
+	MOUNTPOINT=$3
+
+
+	DONE=0
+	TRIES=0
+	while [[ $DONE -ne 1 ]] && [[ $TRIES -lt 20 ]]; do
+		echo "Looking for $VOLUME to come attached"
+		make_nve_ebs_map
+
+		#echo "------- current nvme/ebs map -------"
+		#for K in "${!EBSMAP[@]}"; do echo $K  = ${EBSMAP[$K]} ; done
+		#echo "------- end current nvme/ebs map -------"
+
+		if [[ -b /dev/$VOLUME ]]; then
+			DEV="/dev/$VOLUME"
+			DONE=1
+		elif [[ -b /dev/${EBSMAP[$VOLUME]} ]]; then
+			DEV="/dev/${EBSMAP[$VOLUME]}"
+			DONE=1
+		else
+			sleep 10
+			TRIES=$(( $TRIES + 1 ))
+		fi
+
+		echo "Volume $VOLUME available at $DEV"
+	done
+
+	if ! [[ -d ${MOUNTPOINT} ]]; then
+		echo "Creating mount directory ${MOUNTPOINT}"
+		mkdir -p ${MOUNTPOINT}
+	fi
+
+	if ! blkid -l -t LABEL=${VOL_LABEL}; then
+		echo "Making filesystem for LABEL=${VOL_LABEL} on ${DEV}"
+		mkfs.xfs -L ${VOL_LABEL} ${DEV}
+	fi
+
+	if ! egrep -q "LABEL=${VOL_LABEL}" /etc/fstab; then
+		echo "Adding LABEL=${VOL_LABEL} to /etc/fstab"
+		echo "LABEL=${VOL_LABEL}       ${MOUNTPOINT}    xfs    noatime,nofail  0 2" >> /etc/fstab
+	fi
+
+	if ! mountpoint ${MOUNTPOINT} >/dev/null 2>&1; then
+		echo "Mounting ${MOUNTPOINT}"
+		mount ${MOUNTPOINT}
+	fi
+
+}
+
+do_the_mount opt_splunk xvdf /opt/splunk

+ 24 - 14
base/splunk_servers/cluster_master/main.tf

@@ -43,17 +43,27 @@ resource "aws_instance" "instance" {
 
   # These device definitions are optional, but added for clarity.
   root_block_device {
-      volume_type = "gp2"
-      #volume_size = "60"
+    volume_type = "gp2"
+    volume_size = var.splunk_volume_sizes["cluster_master"]["/"]
       delete_on_termination = true
       encrypted = true
       kms_key_id = data.aws_kms_key.ebs-key.arn
   }
 
+  ebs_block_device {
+    # /opt/splunk
+    # Note: Not in AMI
+    device_name = "/dev/xvdf"
+    volume_size = var.splunk_volume_sizes["cluster_master"]["/opt/splunk"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+  
   ebs_block_device {
     # swap
     device_name = "/dev/xvdm"
-    volume_size = 48
+    volume_size = var.splunk_volume_sizes["cluster_master"]["swap"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -67,7 +77,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /home
     device_name = "/dev/xvdn"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["cluster_master"]["/home"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -77,7 +87,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var
     device_name = "/dev/xvdo"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["cluster_master"]["/var"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -86,7 +96,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var/tmp
     device_name = "/dev/xvdp"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["cluster_master"]["/var/tmp"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -95,7 +105,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var/log
     device_name = "/dev/xvdq"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["cluster_master"]["/var/log"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -104,7 +114,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var/log/audit
     device_name = "/dev/xvdr"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["cluster_master"]["/var/log/audit"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -113,7 +123,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /tmp
     device_name = "/dev/xvds"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["cluster_master"]["/tmp"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -171,11 +181,11 @@ data "template_cloudinit_config" "cloud-init" {
     content      = data.template_file.cloud-init.rendered
   }
 
-  # Additional parts as needed
-  #part {
-  #  content_type = "text/x-shellscript"
-  #  content      = "ffbaz"
-  #}
+  # mount /dev/xvdf at /opt/splunk
+  part {
+    content_type = "text/cloud-boothook"
+    content      = file("${path.module}/cloud-init/opt_splunk.boothook")
+  }
 }
 
 ## Cluster Master Security Group

+ 4 - 0
base/splunk_servers/cluster_master/vars.tf

@@ -9,6 +9,10 @@ variable "splunk_legacy_cidr" {
   type = list(string)
 }
 
+variable "splunk_volume_sizes" {
+  type = map(map(number))
+}
+
 variable "azs" {
   type = list(string)
 }

+ 80 - 0
base/splunk_servers/heavy_forwarder/cloud-init/opt_splunk.boothook

@@ -0,0 +1,80 @@
+#!/bin/bash
+#
+#
+exec > /dev/console
+exec 2>&1
+
+declare -A EBSMAP
+
+# Build a map of EBS NVMe disks from their AWS-API-name to their NVMe name
+# this makes an associative array (like a python hash) of the
+# sdX/xvdX name you'd set in AWS API to the corresponding nvmeX name
+# Thanks Fred for the awesome id-ctrl stuff I'd never seen before
+#
+# One interesting side effect observed:  the id-ctrl output is different when
+# volumes are attached at boot time (no /dev/) versus attached after the OS
+# is started (includes /dev/)
+function make_nve_ebs_map {
+        for DEVICE in $( lsblk -d -o NAME,MODEL -n | egrep "Elastic Block Store" | awk '{ print $1 }' ); do
+                UNDERLYING=$( nvme id-ctrl --raw-binary /dev/${DEVICE} 2>/dev/null | cut -c 3073-3104 | tr -d ' ' | sed "s#/dev/##" )
+
+                EBSMAP[$UNDERLYING]=$DEVICE
+                UNDERLYING2=$( echo $UNDERLYING | sed "s/sd/xvd/" )
+                EBSMAP[$UNDERLYING2]=$DEVICE
+        done
+}
+
+function do_the_mount
+{
+	VOL_LABEL=$1
+	VOLUME=$2
+	MOUNTPOINT=$3
+
+
+	DONE=0
+	TRIES=0
+	while [[ $DONE -ne 1 ]] && [[ $TRIES -lt 20 ]]; do
+		echo "Looking for $VOLUME to come attached"
+		make_nve_ebs_map
+
+		#echo "------- current nvme/ebs map -------"
+		#for K in "${!EBSMAP[@]}"; do echo $K  = ${EBSMAP[$K]} ; done
+		#echo "------- end current nvme/ebs map -------"
+
+		if [[ -b /dev/$VOLUME ]]; then
+			DEV="/dev/$VOLUME"
+			DONE=1
+		elif [[ -b /dev/${EBSMAP[$VOLUME]} ]]; then
+			DEV="/dev/${EBSMAP[$VOLUME]}"
+			DONE=1
+		else
+			sleep 10
+			TRIES=$(( $TRIES + 1 ))
+		fi
+
+		echo "Volume $VOLUME available at $DEV"
+	done
+
+	if ! [[ -d ${MOUNTPOINT} ]]; then
+		echo "Creating mount directory ${MOUNTPOINT}"
+		mkdir -p ${MOUNTPOINT}
+	fi
+
+	if ! blkid -l -t LABEL=${VOL_LABEL}; then
+		echo "Making filesystem for LABEL=${VOL_LABEL} on ${DEV}"
+		mkfs.xfs -L ${VOL_LABEL} ${DEV}
+	fi
+
+	if ! egrep -q "LABEL=${VOL_LABEL}" /etc/fstab; then
+		echo "Adding LABEL=${VOL_LABEL} to /etc/fstab"
+		echo "LABEL=${VOL_LABEL}       ${MOUNTPOINT}    xfs    noatime,nofail  0 2" >> /etc/fstab
+	fi
+
+	if ! mountpoint ${MOUNTPOINT} >/dev/null 2>&1; then
+		echo "Mounting ${MOUNTPOINT}"
+		mount ${MOUNTPOINT}
+	fi
+
+}
+
+do_the_mount opt_splunk xvdf /opt/splunk

+ 28 - 18
base/splunk_servers/heavy_forwarder/main.tf

@@ -1,7 +1,7 @@
 # Some instance variables
 locals {
   ami_selection = "minion" # master, minion, ...
-  instance_name = "${ var.prefix }-splunk-sh"
+  instance_name = "${ var.prefix }-splunk-hf"
   is_moose = length(regexall("moose", var.prefix)) > 0 ? true : false
 }
 
@@ -43,17 +43,27 @@ resource "aws_instance" "instance" {
 
   # These device definitions are optional, but added for clarity.
   root_block_device {
-      volume_type = "gp2"
-      #volume_size = "60"
-      delete_on_termination = true
-      encrypted = true
-      kms_key_id = data.aws_kms_key.ebs-key.arn
+    volume_type = "gp2"
+    volume_size = var.splunk_volume_sizes["heavy_forwarder"]["/"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # /opt/splunk
+    # Note: Not in AMI
+    device_name = "/dev/xvdf"
+    volume_size = var.splunk_volume_sizes["heavy_forwarder"]["/opt/splunk"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
   }
 
   ebs_block_device {
     # swap
     device_name = "/dev/xvdm"
-    volume_size = 48
+    volume_size = var.splunk_volume_sizes["heavy_forwarder"]["swap"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -67,7 +77,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /home
     device_name = "/dev/xvdn"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["heavy_forwarder"]["/home"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -77,7 +87,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var
     device_name = "/dev/xvdo"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["heavy_forwarder"]["/var"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -86,7 +96,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var/tmp
     device_name = "/dev/xvdp"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["heavy_forwarder"]["/var/tmp"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -95,7 +105,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var/log
     device_name = "/dev/xvdq"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["heavy_forwarder"]["/var/log"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -104,7 +114,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var/log/audit
     device_name = "/dev/xvdr"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["heavy_forwarder"]["/var/log/audit"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -113,7 +123,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /tmp
     device_name = "/dev/xvds"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["heavy_forwarder"]["/tmp"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -171,11 +181,11 @@ data "template_cloudinit_config" "cloud-init" {
     content      = data.template_file.cloud-init.rendered
   }
 
-  # Additional parts as needed
-  #part {
-  #  content_type = "text/x-shellscript"
-  #  content      = "ffbaz"
-  #}
+  # mount /dev/xvdf at /opt/splunk
+  part {
+    content_type = "text/cloud-boothook"
+    content      = file("${path.module}/cloud-init/opt_splunk.boothook")
+  }
 }
 
 ## Heavy Forwarder

+ 4 - 0
base/splunk_servers/heavy_forwarder/vars.tf

@@ -17,6 +17,10 @@ variable "subnets" {
   type = list(string)
 }
 
+variable "splunk_volume_sizes" {
+  type = map(map(number))
+}
+
 variable "vpc_id" {
   type = string
 }

+ 3 - 3
base/splunk_servers/indexer_cluster/asg.tf

@@ -4,7 +4,7 @@ module "indexer0" {
   asg_number                 = 0
   asg_name                   = "${local.asg_name}-0"
   launch_conf_name           = "${local.launch_config_name}-0"
-  volume_sizes               = var.volume_sizes
+  volume_sizes               = var.splunk_volume_sizes["indexer"]
   idx_instance_type          = var.instance_type
   user_data                  = data.template_cloudinit_config.cloud-init.rendered
   indexer_security_group_ids = [ data.aws_security_group.typical-host.id, aws_security_group.indexer_security_group.id ]
@@ -23,7 +23,7 @@ module "indexer1" {
   asg_number                 = 1
   asg_name                   = "${local.asg_name}-1"
   launch_conf_name           = "${local.launch_config_name}-1"
-  volume_sizes               = var.volume_sizes
+  volume_sizes               = var.splunk_volume_sizes["indexer"]
   idx_instance_type          = var.instance_type
   user_data                  = data.template_cloudinit_config.cloud-init.rendered
   indexer_security_group_ids = [ data.aws_security_group.typical-host.id, aws_security_group.indexer_security_group.id ]
@@ -42,7 +42,7 @@ module "indexer2" {
   asg_number                 = 2
   asg_name                   = "${local.asg_name}-2"
   launch_conf_name           = "${local.launch_config_name}-2"
-  volume_sizes               = var.volume_sizes
+  volume_sizes               = var.splunk_volume_sizes["indexer"]
   idx_instance_type          = var.instance_type
   user_data                  = data.template_cloudinit_config.cloud-init.rendered
   indexer_security_group_ids = [ data.aws_security_group.typical-host.id, aws_security_group.indexer_security_group.id ]

+ 3 - 3
base/splunk_servers/indexer_cluster/cloud-init/cloud-init.tpl

@@ -38,9 +38,9 @@ growpart:
   ignore_growroot_disabled: false
 
 bootcmd:
- - "INSTANCE_ID=`/usr/bin/curl -f --connect-timeout 1 --silent http://169.254.169.254/latest/meta-data/instance-id`"
- - "/bin/hostnamectl set-hostname ${prefix}-splunk-indexer-$INSTANCE_ID'.${zone}'"
- - "/bin/hostname > /etc/salt/minion_id"
+ - "INSTANCE_ID=`/usr/bin/curl -f --connect-timeout 1 --silent http://169.254.169.254/latest/meta-data/instance-id | tail -c 3`"
+ - "/bin/hostnamectl set-hostname ${prefix}-splunk-idx-$INSTANCE_ID'.${zone}'"
+ - "echo ${prefix}-splunk-idx-$INSTANCE_ID'.${zone}' > /etc/salt/minion_id"
 
 runcmd:
  - /bin/systemctl restart salt-minion

+ 2 - 2
base/splunk_servers/indexer_cluster/vars.tf

@@ -3,9 +3,9 @@ variable "prefix" {
   type = string
 }
 
-variable "volume_sizes" {
+variable "splunk_volume_sizes" {
   description = "Map of volume sizes"
-  type = map(number)
+  type = map(map(number))
 }
 
 variable "splunk_legacy_cidr" {

+ 80 - 0
base/splunk_servers/searchhead/cloud-init/opt_splunk.boothook

@@ -0,0 +1,80 @@
+#!/bin/bash
+#
+#
+exec > /dev/console
+exec 2>&1
+
+declare -A EBSMAP
+
+# Build a map of EBS NVMe disks from their AWS-API-name to their NVMe name
+# this makes an associative array (like a python hash) of the
+# sdX/xvdX name you'd set in AWS API to the corresponding nvmeX name
+# Thanks Fred for the awesome id-ctrl stuff I'd never seen before
+#
+# One interesting side effect observed:  the id-ctrl output is different when
+# volumes are attached at boot time (no /dev/) versus attached after the OS
+# is started (includes /dev/)
+function make_nve_ebs_map {
+        for DEVICE in $( lsblk -d -o NAME,MODEL -n | egrep "Elastic Block Store" | awk '{ print $1 }' ); do
+                UNDERLYING=$( nvme id-ctrl --raw-binary /dev/${DEVICE} 2>/dev/null | cut -c 3073-3104 | tr -d ' ' | sed "s#/dev/##" )
+
+                EBSMAP[$UNDERLYING]=$DEVICE
+                UNDERLYING2=$( echo $UNDERLYING | sed "s/sd/xvd/" )
+                EBSMAP[$UNDERLYING2]=$DEVICE
+        done
+}
+
+function do_the_mount
+{
+	VOL_LABEL=$1
+	VOLUME=$2
+	MOUNTPOINT=$3
+
+
+	DONE=0
+	TRIES=0
+	while [[ $DONE -ne 1 ]] && [[ $TRIES -lt 20 ]]; do
+		echo "Looking for $VOLUME to come attached"
+		make_nve_ebs_map
+
+		#echo "------- current nvme/ebs map -------"
+		#for K in "${!EBSMAP[@]}"; do echo $K  = ${EBSMAP[$K]} ; done
+		#echo "------- end current nvme/ebs map -------"
+
+		if [[ -b /dev/$VOLUME ]]; then
+			DEV="/dev/$VOLUME"
+			DONE=1
+		elif [[ -b /dev/${EBSMAP[$VOLUME]} ]]; then
+			DEV="/dev/${EBSMAP[$VOLUME]}"
+			DONE=1
+		else
+			sleep 10
+			TRIES=$(( $TRIES + 1 ))
+		fi
+
+		echo "Volume $VOLUME available at $DEV"
+	done
+
+	if ! [[ -d ${MOUNTPOINT} ]]; then
+		echo "Creating mount directory ${MOUNTPOINT}"
+		mkdir -p ${MOUNTPOINT}
+	fi
+
+	if ! blkid -l -t LABEL=${VOL_LABEL}; then
+		echo "Making filesystem for LABEL=${VOL_LABEL} on ${DEV}"
+		mkfs.xfs -L ${VOL_LABEL} ${DEV}
+	fi
+
+	if ! egrep -q "LABEL=${VOL_LABEL}" /etc/fstab; then
+		echo "Adding LABEL=${VOL_LABEL} to /etc/fstab"
+		echo "LABEL=${VOL_LABEL}       ${MOUNTPOINT}    xfs    noatime,nofail  0 2" >> /etc/fstab
+	fi
+
+	if ! mountpoint ${MOUNTPOINT} >/dev/null 2>&1; then
+		echo "Mounting ${MOUNTPOINT}"
+		mount ${MOUNTPOINT}
+	fi
+
+}
+
+do_the_mount opt_splunk xvdf /opt/splunk

+ 27 - 17
base/splunk_servers/searchhead/main.tf

@@ -43,17 +43,27 @@ resource "aws_instance" "instance" {
 
   # These device definitions are optional, but added for clarity.
   root_block_device {
-      volume_type = "gp2"
-      #volume_size = "60"
-      delete_on_termination = true
-      encrypted = true
-      kms_key_id = data.aws_kms_key.ebs-key.arn
+    volume_type = "gp2"
+    volume_size = var.splunk_volume_sizes["searchhead"]["/"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # /opt/splunk
+    # Note: Not in AMI
+    device_name = "/dev/xvdf"
+    volume_size = var.splunk_volume_sizes["searchhead"]["/opt/splunk"]
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
   }
 
   ebs_block_device {
     # swap
     device_name = "/dev/xvdm"
-    volume_size = 48
+    volume_size = var.splunk_volume_sizes["searchhead"]["swap"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -67,7 +77,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /home
     device_name = "/dev/xvdn"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["searchhead"]["/home"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -77,7 +87,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var
     device_name = "/dev/xvdo"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["searchhead"]["/var"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -86,7 +96,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var/tmp
     device_name = "/dev/xvdp"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["searchhead"]["/var/tmp"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -95,7 +105,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var/log
     device_name = "/dev/xvdq"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["searchhead"]["/var/log"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -104,7 +114,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /var/log/audit
     device_name = "/dev/xvdr"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["searchhead"]["/var/log/audit"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -113,7 +123,7 @@ resource "aws_instance" "instance" {
   ebs_block_device {
     # /tmp
     device_name = "/dev/xvds"
-    # volume_size = xx
+    volume_size = var.splunk_volume_sizes["searchhead"]["/tmp"]
     delete_on_termination = true
     encrypted = true
     kms_key_id = data.aws_kms_key.ebs-key.arn
@@ -171,11 +181,11 @@ data "template_cloudinit_config" "cloud-init" {
     content      = data.template_file.cloud-init.rendered
   }
 
-  # Additional parts as needed
-  #part {
-  #  content_type = "text/x-shellscript"
-  #  content      = "ffbaz"
-  #}
+  # mount /dev/xvdf at /opt/splunk
+  part {
+    content_type = "text/cloud-boothook"
+    content      = file("${path.module}/cloud-init/opt_splunk.boothook")
+  }
 }
 
 ## Searchhead

+ 4 - 0
base/splunk_servers/searchhead/vars.tf

@@ -13,6 +13,10 @@ variable "azs" {
   type = list(string)
 }
 
+variable "splunk_volume_sizes" {
+  type = map(map(number))
+}
+
 variable "subnets" {
   type = list(string)
 }

+ 13 - 1
submodules/splunk/splunk_indexer_asg/main.tf

@@ -53,7 +53,19 @@ resource "aws_launch_template" "splunk_indexer" {
         volume_size = var.volume_sizes["/"]
         delete_on_termination = true
         encrypted = true
-        #kms_key_id = data.aws_kms_key.ebs-key.arn
+        kms_key_id = data.aws_kms_key.ebs-key.arn
+      }
+    }
+
+    block_device_mappings {
+      device_name = "/dev/xvdf"
+
+      ebs {
+        volume_type = "gp2"
+        volume_size = var.volume_sizes["/opt/splunk"]
+        delete_on_termination = true
+        encrypted = true
+        kms_key_id = data.aws_kms_key.ebs-key.arn
       }
     }