# Some instance variables locals { ami_selection = "minion" # master, minion, ... repo_drive_size = 30 # Consider changing volume type to SC1 if size is ever > 500GB } # Rather than pass in the aws security group, we just look it up. This will # probably be useful other places, as well. data "aws_security_group" "typical-host" { name = "typical-host" vpc_id = var.vpc_id } # Use the default EBS key data "aws_kms_key" "ebs-key" { key_id = "alias/ebs_root_encrypt_decrypt" } resource "aws_network_interface" "instance" { subnet_id = var.public_subnets[0] security_groups = [data.aws_security_group.typical-host.id, aws_security_group.repo_server_security_group.id] description = var.instance_name tags = merge(local.standard_tags, var.tags, { Name = var.instance_name }) } resource "aws_eip" "instance" { vpc = true tags = merge(local.standard_tags, var.tags, { Name = var.instance_name }) } resource "aws_eip_association" "instance" { network_interface_id = aws_network_interface.instance.id allocation_id = aws_eip.instance.id } resource "aws_instance" "instance" { tenancy = "default" ebs_optimized = true disable_api_termination = var.instance_termination_protection instance_initiated_shutdown_behavior = "stop" instance_type = "t3a.xlarge" key_name = "msoc-build" monitoring = false iam_instance_profile = "msoc-default-instance-profile" ami = local.ami_map[local.ami_selection] # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id. # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then # that could be removed. lifecycle { ignore_changes = [ami, key_name, user_data, ebs_block_device] } metadata_options { http_endpoint = "enabled" http_tokens = "optional" # tfsec:ignore:aws-ec2-enforce-http-token-imds Breaks salt } # These device definitions are optional, but added for clarity. root_block_device { volume_type = "gp3" #volume_size = "60" delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn } ebs_block_device { # swap device_name = "/dev/xvdm" #volume_size = 48 delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly. # This may prompt replacement when the AMI is updated. # See: # https://github.com/hashicorp/terraform/issues/19958 # https://github.com/terraform-providers/terraform-provider-aws/issues/13118 snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id } ebs_block_device { # /home device_name = "/dev/xvdn" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id } ebs_block_device { # /var device_name = "/dev/xvdo" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id } ebs_block_device { # /var/tmp device_name = "/dev/xvdp" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id } ebs_block_device { # /var/log device_name = "/dev/xvdq" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id } ebs_block_device { # /var/log/audit device_name = "/dev/xvdr" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id } ebs_block_device { # /tmp device_name = "/dev/xvds" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id } network_interface { device_index = 0 network_interface_id = aws_network_interface.instance.id } user_data = data.template_cloudinit_config.cloud_init_config.rendered tags = merge(local.standard_tags, var.tags, var.instance_tags, { Name = var.instance_name }) volume_tags = merge(local.standard_tags, var.tags, { Name = var.instance_name }) } module "private_dns_record" { source = "../../submodules/dns/private_A_record" name = "${var.instance_name}-server" ip_addresses = [aws_instance.instance.private_ip] dns_info = var.dns_info reverse_enabled = var.reverse_enabled providers = { aws.c2 = aws.c2 } } # Render a multi-part cloud-init config making use of the part # above, and other source files data "template_cloudinit_config" "cloud_init_config" { gzip = true base64_encode = true # Main cloud-config configuration file. part { filename = "init.cfg" content_type = "text/cloud-config" content = templatefile("${path.module}/cloud-init/cloud-init.tpl", { hostname = var.instance_name fqdn = "${var.instance_name}.${var.dns_info["private"]["zone"]}" environment = var.environment salt_master = local.salt_master proxy = local.proxy aws_partition = var.aws_partition aws_partition_alias = var.aws_partition_alias aws_region = var.aws_region } ) } part { content_type = "text/cloud-boothook" content = file("${path.module}/cloud-init/repo_server_volumes.boothook") } } resource "aws_security_group" "repo_server_security_group" { name = "repo_server_security_group" description = "Security Group for the Repository Server(s) port 80" vpc_id = var.vpc_id tags = merge(local.standard_tags, var.tags) } resource "aws_security_group_rule" "http-in" { description = "inbound repository requests" type = "ingress" from_port = 80 to_port = 80 protocol = "tcp" source_security_group_id = aws_security_group.alb_internal.id security_group_id = aws_security_group.repo_server_security_group.id } resource "aws_security_group_rule" "http-in-external" { description = "inbound repository requests from the alb" type = "ingress" from_port = 80 to_port = 80 protocol = "tcp" source_security_group_id = module.elb.security_group_id security_group_id = aws_security_group.repo_server_security_group.id } # Repo server has an extra volume that is created separately, to keep it from being destroyed # with the instance. resource "aws_ebs_volume" "repo_server_drive" { availability_zone = aws_instance.instance.availability_zone size = local.repo_drive_size type = "gp3" # consider moving to sc1 if this is ever > 500GB encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn #snapshot_id = "${data.aws_ebs_snapshot.repo_snapshot.id}" tags = merge(local.standard_tags, var.tags, { Name = "${var.instance_name}-repo_volume-_var_www" }) lifecycle { ignore_changes = [snapshot_id] } } resource "aws_volume_attachment" "repo_attachment" { device_name = "/dev/xvdf" volume_id = aws_ebs_volume.repo_server_drive.id instance_id = aws_instance.instance.id force_detach = true }