Browse Source

Separates /opt/vmray into a separate partition

To be tagged v3.2.0
Fred Damstra [afs macbook] 3 years ago
parent
commit
e4cf11eb47

+ 80 - 0
base/vmray_instances/cloud-init/opt_vmray.boothook

@@ -0,0 +1,80 @@
+#!/bin/bash
+#
+#
+exec > /dev/console
+exec 2>&1
+
+declare -A EBSMAP
+
+# Build a map of EBS NVMe disks from their AWS-API-name to their NVMe name
+# this makes an associative array (like a python hash) of the
+# sdX/xvdX name you'd set in AWS API to the corresponding nvmeX name
+# Thanks Fred for the awesome id-ctrl stuff I'd never seen before
+#
+# One interesting side effect observed:  the id-ctrl output is different when
+# volumes are attached at boot time (no /dev/) versus attached after the OS
+# is started (includes /dev/)
+function make_nve_ebs_map {
+        for DEVICE in $( lsblk -d -o NAME,MODEL -n | egrep "Elastic Block Store" | awk '{ print $1 }' ); do
+                UNDERLYING=$( nvme id-ctrl --raw-binary /dev/${DEVICE} 2>/dev/null | cut -c 3073-3104 | tr -d ' ' | sed "s#/dev/##" )
+
+                EBSMAP[$UNDERLYING]=$DEVICE
+                UNDERLYING2=$( echo $UNDERLYING | sed "s/sd/xvd/" )
+                EBSMAP[$UNDERLYING2]=$DEVICE
+        done
+}
+
+function do_the_mount
+{
+	VOL_LABEL=$1
+	VOLUME=$2
+	MOUNTPOINT=$3
+
+
+	DONE=0
+	TRIES=0
+	while [[ $DONE -ne 1 ]] && [[ $TRIES -lt 20 ]]; do
+		echo "Looking for $VOLUME to come attached"
+		make_nve_ebs_map
+
+		#echo "------- current nvme/ebs map -------"
+		#for K in "${!EBSMAP[@]}"; do echo $K  = ${EBSMAP[$K]} ; done
+		#echo "------- end current nvme/ebs map -------"
+
+		if [[ -b /dev/$VOLUME ]]; then
+			DEV="/dev/$VOLUME"
+			DONE=1
+		elif [[ -b /dev/${EBSMAP[$VOLUME]} ]]; then
+			DEV="/dev/${EBSMAP[$VOLUME]}"
+			DONE=1
+		else
+			sleep 10
+			TRIES=$(( $TRIES + 1 ))
+		fi
+
+		echo "Volume $VOLUME available at $DEV"
+	done
+
+	if ! [[ -d ${MOUNTPOINT} ]]; then
+		echo "Creating mount directory ${MOUNTPOINT}"
+		mkdir -p ${MOUNTPOINT}
+	fi
+
+	if ! blkid -l -t LABEL=${VOL_LABEL}; then
+		echo "Making filesystem for LABEL=${VOL_LABEL} on ${DEV}"
+		mkfs.xfs -L ${VOL_LABEL} ${DEV}
+	fi
+
+	if ! egrep -q "LABEL=${VOL_LABEL}" /etc/fstab; then
+		echo "Adding LABEL=${VOL_LABEL} to /etc/fstab"
+		echo "LABEL=${VOL_LABEL}       ${MOUNTPOINT}    xfs    noatime,nofail  0 2" >> /etc/fstab
+	fi
+
+	if ! mountpoint ${MOUNTPOINT} >/dev/null 2>&1; then
+		echo "Mounting ${MOUNTPOINT}"
+		mount ${MOUNTPOINT}
+	fi
+
+}
+
+do_the_mount opt_vmray xvdf /opt/vmray

+ 23 - 5
base/vmray_instances/server.tf

@@ -46,6 +46,23 @@ resource "aws_network_interface" "vmray-server-interface" {
   tags = merge(var.standard_tags, var.tags, { Name = "vmray-server" })
 }
 
+# Make /opt/vmray separate from the instance for greater margin of safety
+resource "aws_ebs_volume" "server_opt_vmray" {
+  availability_zone = var.azs[0]
+  size = var.vmray_server_opt_vmray_size
+  type = "gp3"
+  encrypted = true
+  kms_key_id = data.aws_kms_key.ebs-key.arn
+
+  tags = merge(var.standard_tags, var.tags, { Name = "vmray-server", Path = "/opt/vmray", Device = "/dev/xvdf" })
+}
+
+resource "aws_volume_attachment" "server_opt_vmray" {
+  device_name = "/dev/xvdf"
+  volume_id   = aws_ebs_volume.server_opt_vmray.id
+  instance_id = aws_instance.vmray-server-instance.id
+}
+
 resource "aws_instance" "vmray-server-instance" {
   tenancy = "default"
   ebs_optimized = true
@@ -201,11 +218,12 @@ data "template_cloudinit_config" "cloud-init-vmray-server" {
     )
   }
 
-  # Additional parts as needed
-  #part {
-  #  content_type = "text/x-shellscript"
-  #  content      = "ffbaz"
-  #}
+  # mount /dev/xvdf at /opt/vmray
+  part {
+    content_type = "text/cloud-boothook"
+    content      = file("${path.module}/cloud-init/opt_vmray.boothook")
+  }
+
 }
 
 module "private_dns_record_vmray_server" {

+ 12 - 0
base/vmray_instances/vars.tf

@@ -3,6 +3,18 @@ variable "vmray_worker_instance_count" {
   type        = number
 }
 
+variable "vmray_server_opt_vmray_size" {
+  description = "Size of /opt/vmray directory on the server."
+  type = number
+  default = 30 # I don't expect images here, so not much space should be required
+}
+
+variable "vmray_worker_opt_vmray_size" {
+  description = "Size of /opt/vmray directory on the worker nodes"
+  type = number
+  default = 300 # Stores images, which are ~ 25GB each
+}
+
 variable "tags" {
   description = "Tags to add to the resource (in addition to global standard tags)"
   type        = map

+ 35 - 5
base/vmray_instances/worker.tf

@@ -6,6 +6,25 @@ resource "aws_network_interface" "vmray-worker-interface" {
   tags = merge(var.standard_tags, var.tags, { Name = "vmray-worker" })
 }
 
+# Make /opt/vmray separate from the instance for greater margin of safety
+resource "aws_ebs_volume" "worker_opt_vmray" {
+  count = var.vmray_worker_instance_count
+  availability_zone = var.azs[count.index % 3]
+  size = var.vmray_worker_opt_vmray_size
+  type = "gp3"
+  encrypted = true
+  kms_key_id = data.aws_kms_key.ebs-key.arn
+
+  tags = merge(var.standard_tags, var.tags, { Name = "vmray-worker-${count.index}", Path = "/opt/vmray", Device = "/dev/xvdf" })
+}
+
+resource "aws_volume_attachment" "worker_opt_vmray" {
+  count = var.vmray_worker_instance_count
+  device_name = "/dev/xvdf"
+  volume_id   = aws_ebs_volume.worker_opt_vmray[count.index].id
+  instance_id = aws_instance.vmray-worker-instance[count.index].id
+}
+
 resource "aws_instance" "vmray-worker-instance" {
   count = var.vmray_worker_instance_count
   tenancy = "default"
@@ -31,6 +50,16 @@ resource "aws_instance" "vmray-worker-instance" {
       kms_key_id = data.aws_kms_key.ebs-key.arn
   }
 
+  ebs_block_device {
+    # /opt/vmray
+    # Note: Not in AMI
+    device_name = "/dev/xvdf"
+    volume_size = var.vmray_worker_opt_vmray_size
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
   ebs_block_device {
     # swap
     device_name = "/dev/xvdm"
@@ -143,11 +172,12 @@ data "template_cloudinit_config" "cloud-init-vmray-worker" {
     )
   }
 
-  # Additional parts as needed
-  #part {
-  #  content_type = "text/x-shellscript"
-  #  content      = "ffbaz"
-  #}
+  # mount /dev/xvdf at /opt/vmray
+  part {
+    content_type = "text/cloud-boothook"
+    content      = file("${path.module}/cloud-init/opt_vmray.boothook")
+  }
+
 }
 
 module "private_dns_record_vmray_worker" {