#!/bin/bash # # Assumptions: # # 1] we're on aws # 2] any ephemeral devices are ours to take # 3] if no ephemeral devices are available then there's a list # of hard-coded block devices to use for the splunk hot VG # #-------------------------------------------------------------------- exec > /dev/console exec 2>&1 HOT_VG_NAME="vg_splunkhot" HOT_LV_NAME="lv_splunkhot" # These are the *HARD-CODED* volumes that we will use when no # ephemeral disks are available HOT_EBS_VOLUMES="xvdg xvdh" IMDS2_TOKEN=$( curl --silent --fail -X PUT --connect-timeout 1 --max-time 2 'http://169.254.169.254/latest/api/token' -H 'X-aws-ec2-metadata-token-ttl-seconds: 90' ) CURL="curl -f --connect-timeout 1 -silent" declare -A EBSMAP # Yes heavy assumption we're on AWS INSTANCE_TYPE="`${CURL} -H X-aws-ec2-metadata-token:\ ${IMDS2_TOKEN} http://169.254.169.254/latest/meta-data/instance-type`" if [[ "$INSTANCE_TYPE" == "" ]]; then echo "Could not figure out instance type, giving up" exit 1 fi # Build a map of EBS NVMe disks from their AWS-API-name to their NVMe name # this makes an associative array (like a python hash) of the # sdX/xvdX name you'd set in AWS API to the corresponding nvmeX name # Thanks Fred for the awesome id-ctrl stuff I'd never seen before # # One interesting side effect observed: the id-ctrl output is different when # volumes are attached at boot time (no /dev/) versus attached after the OS # is started (includes /dev/) function make_nve_ebs_map { for DEVICE in $( lsblk -d -o NAME,MODEL -n | egrep "Elastic Block Store" | awk '{ print $1 }' ); do UNDERLYING=$( nvme id-ctrl --raw-binary /dev/${DEVICE} 2>/dev/null | cut -c 3073-3104 | tr -d ' ' | sed "s#/dev/##" ) EBSMAP[$UNDERLYING]=$DEVICE UNDERLYING2=$( echo $UNDERLYING | sed "s/sd/xvd/" ) EBSMAP[$UNDERLYING2]=$DEVICE done } DEVS="" # Look for ephemeral NVMe disks EPHEMERAL_DISKS=$( lsblk -d -o NAME,SIZE,TYPE,MODEL,SERIAL | egrep "Amazon EC2 NVMe Instance Storage" | awk '{ print "/dev/"$1 }' ) if [[ "${EPHEMERAL_DISKS}" != "" ]]; then # We have some ephemeral disks lets use them # This is the happy path DEVS="${EPHEMERAL_DISKS}" else # Looking for the hard-coded volumes above to come attached. They # could be attached immediately, or it could take a "couple of minutes" # as they are created and attached by terraform. # Checking for both the normal attachment and the NVMe form for VOLUME in $HOT_EBS_VOLUMES; do DONE=0 TRIES=0 while [[ $DONE -ne 1 ]] && [[ $TRIES -lt 20 ]]; do echo "Looking for $VOLUME to come attached" make_nve_ebs_map #echo "------- current nvme/ebs map -------" #for K in "${!EBSMAP[@]}"; do echo $K = ${EBSMAP[$K]} ; done #echo "------- end current nvme/ebs map -------" if [[ -b /dev/$VOLUME ]]; then DEVS="/dev/$VOLUME $DEVS" DONE=1 elif [[ -b /dev/${EBSMAP[$VOLUME]} ]]; then DEVS="/dev/${EBSMAP[$VOLUME]} $DEVS" DONE=1 else sleep 10 TRIES=$(( $TRIES + 1 )) fi done done fi if [[ "$DEVS" == "" ]]; then echo "Failed to enumerate possible devices, oops" exit 1 fi DEVCOUNT=`echo $DEVS | wc -w` # See if the volume group already exists - if not let's make it if ! vgs --noheadings ${HOT_VG_NAME} >/dev/null 2>&1; then echo "Making VG on devices ${DEVS}" vgcreate ${HOT_VG_NAME} ${DEVS} fi # See if the logical volume already exists - if not make it # and also make the filesystem if ! lvs --noheadings ${HOT_VG_NAME}/${HOT_LV_NAME} >/dev/null 2>&1; then echo "Making LV" lvcreate -l 100%FREE --stripes $DEVCOUNT --name ${HOT_LV_NAME} ${HOT_VG_NAME} mkfs -t ext4 /dev/${HOT_VG_NAME}/${HOT_LV_NAME} fi if ! egrep -q "/dev/${HOT_VG_NAME}/${HOT_LV_NAME}" /etc/fstab; then echo "Adding to fstab" echo "/dev/vg_splunkhot/lv_splunkhot /opt/splunkdata/hot ext4 nofail,noatime 0 0" >> /etc/fstab fi if [[ ! -d /opt/splunkdata/hot ]]; then echo "Creating mount directories" mkdir -p /opt/splunkdata/hot fi if ! mountpoint /opt/splunkdata/hot >/dev/null 2>&1; then echo "Mounting it" mount /opt/splunkdata/hot fi # Looking for splunk user, trying to fix up ownerships in case they got lost # This commonly happens when an ephemeral storage box loses its ephemeral storage # but everything else survives if getent passwd splunk | egrep -q splunk; then echo "Changing ownership of /opt/splunkdata and /opt/splunkdata/hot" chown splunk:splunk /opt/splunkdata chown splunk:splunk /opt/splunkdata/hot fi