# Some instance variables locals { ami_selection = "minion" # master, minion, ... repo_drive_size = 10 # Consider changing volume type to SC1 if size is ever > 500GB } # Rather than pass in the aws security group, we just look it up. This will # probably be useful other places, as well. data "aws_security_group" "typical-host" { name = "typical-host" vpc_id = var.vpc_id } # Use the default EBS key data "aws_kms_key" "ebs-key" { key_id = "alias/ebs_root_encrypt_decrypt" } resource "aws_network_interface" "instance" { subnet_id = var.subnets[0] security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.repo_server_security_group.id ] description = var.instance_name tags = merge(var.standard_tags, var.tags, { Name = var.instance_name }) } resource "aws_eip" "instance" { vpc = true tags = merge(var.standard_tags, var.tags, { Name = var.instance_name }) } resource "aws_eip_association" "instance" { network_interface_id = aws_network_interface.instance.id allocation_id = aws_eip.instance.id } resource "aws_instance" "instance" { tenancy = "default" ebs_optimized = true disable_api_termination = var.instance_termination_protection instance_initiated_shutdown_behavior = "stop" instance_type = var.instance_type key_name = "msoc-build" monitoring = false iam_instance_profile = "msoc-default-instance-profile" ami = local.ami_map[local.ami_selection] # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id. # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then # that could be removed. lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] } # These device definitions are optional, but added for clarity. root_block_device { volume_type = "gp2" #volume_size = "60" delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn } ebs_block_device { # swap device_name = "/dev/xvdm" #volume_size = 48 delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly. # This may prompt replacement when the AMI is updated. # See: # https://github.com/hashicorp/terraform/issues/19958 # https://github.com/terraform-providers/terraform-provider-aws/issues/13118 snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id } ebs_block_device { # /home device_name = "/dev/xvdn" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id } ebs_block_device { # /var device_name = "/dev/xvdo" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id } ebs_block_device { # /var/tmp device_name = "/dev/xvdp" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id } ebs_block_device { # /var/log device_name = "/dev/xvdq" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id } ebs_block_device { # /var/log/audit device_name = "/dev/xvdr" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id } ebs_block_device { # /tmp device_name = "/dev/xvds" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id } network_interface { device_index = 0 network_interface_id = aws_network_interface.instance.id } user_data = data.template_cloudinit_config.cloud_init_config.rendered tags = merge( var.standard_tags, var.tags, { Name = var.instance_name }) volume_tags = merge( var.standard_tags, var.tags, { Name = var.instance_name }) } module "private_dns_record" { source = "../../submodules/dns/private_A_record" name = var.instance_name ip_addresses = [ aws_instance.instance.private_ip ] dns_info = var.dns_info reverse_enabled = var.reverse_enabled providers = { aws.c2 = aws.c2 } } module "public_dns_record" { source = "../../submodules/dns/public_A_record" name = var.instance_name ip_addresses = [ aws_eip.instance.public_ip ] dns_info = var.dns_info providers = { aws.mdr-common-services-commercial = aws.mdr-common-services-commercial } } #The Cloud init data is to prepare the instance for use. data "template_file" "cloud_init" { # Should these be in a common directory? I suspect they'd be reusable template = file("${path.module}/cloud-init/cloud-init.tpl") vars = { hostname = var.instance_name fqdn = "${var.instance_name}.${var.dns_info["private"]["zone"]}" environment = var.environment salt_master = var.salt_master proxy = var.proxy aws_partition = var.aws_partition aws_partition_alias = var.aws_partition_alias aws_region = var.aws_region } } # Render a multi-part cloud-init config making use of the part # above, and other source files data "template_cloudinit_config" "cloud_init_config" { gzip = true base64_encode = true # Main cloud-config configuration file. part { filename = "init.cfg" content_type = "text/cloud-config" content = data.template_file.cloud_init.rendered } part { content_type = "text/cloud-boothook" content = file("${path.module}/cloud-init/repo_server_volumes.boothook") } } resource "aws_security_group" "repo_server_security_group" { name = "repo_server_security_group" description = "Security Group for the Repository Server(s)" vpc_id = var.vpc_id tags = merge(var.standard_tags, var.tags) } resource "aws_security_group_rule" "http-in" { description = "inbound repository requests" type = "ingress" from_port = 80 to_port = 80 protocol = "tcp" cidr_blocks = toset(concat([ "10.0.0.0/8" ], var.repo_server_whitelist)) security_group_id = aws_security_group.repo_server_security_group.id } resource "aws_security_group_rule" "http-in-external-c2-users" { # This deserves some explanation. Terraform "for_each" expects to be # getting as input a map of values to iterate over as part of the foreach. # The keys of the map are used to name each of these objects created. Looking # in the terraform plan output of a for_each you'll see things like: # # aws_security_group_rule.resource_name["key-value-from-foreach"] will be created # # Our c2_services_external_ips is a list of maps, not a map of maps. The for-expression # makes a new thing that is a map of maps, where the key value is the description with # blanks removed. # # We could have made the variable more natively-friendly to for_each but this seemed # like a better solution for what we were trying to accomplish. for_each = { for s in var.c2_services_external_ips : replace(s.description,"/\\s*/","") => s } description = "inbound repository requests - ${each.value.description}" type = "ingress" from_port = 80 to_port = 80 protocol = "tcp" cidr_blocks = each.value.cidr_blocks security_group_id = aws_security_group.repo_server_security_group.id } resource "aws_security_group_rule" "https-in" { description = "inbound repository requests" type = "ingress" from_port = 443 to_port = 443 protocol = "tcp" cidr_blocks = toset(concat([ "10.0.0.0/8" ], var.repo_server_whitelist)) security_group_id = aws_security_group.repo_server_security_group.id } resource "aws_security_group_rule" "https-in-external-c2-users" { for_each = { for s in var.c2_services_external_ips : replace(s.description,"/\\s*/","") => s } description = "inbound repository requests - ${each.value.description}" type = "ingress" from_port = 443 to_port = 443 protocol = "tcp" cidr_blocks = each.value.cidr_blocks security_group_id = aws_security_group.repo_server_security_group.id } # Repo server has an extra volume that is created separately, to keep it from being destroyed # with the instance. resource "aws_ebs_volume" "repo_server_drive" { availability_zone = aws_instance.instance.availability_zone size = local.repo_drive_size type = "gp2" # consider moving to sc1 if this is ever > 500GB #snapshot_id = "${data.aws_ebs_snapshot.repo_snapshot.id}" tags = merge(var.standard_tags, var.tags, { Name = "${var.instance_name}-repo_volume-_var_www" }) lifecycle { ignore_changes = [ snapshot_id ] } } resource "aws_volume_attachment" "repo_attachment" { device_name = "/dev/xvdf" volume_id = aws_ebs_volume.repo_server_drive.id instance_id = aws_instance.instance.id force_detach = true }