# Rather than pass in the aws security group, we just look it up. This will # probably be useful other places, as well. data "aws_security_group" "typical-host" { name = "typical-host" vpc_id = var.vpc_id } # Use the default EBS key data "aws_kms_key" "ebs-key" { key_id = "alias/ebs_root_encrypt_decrypt" } resource "aws_network_interface" "instance" { subnet_id = var.subnets[0] security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.salt_master_security_group.id ] description = var.instance_name tags = merge(var.standard_tags, var.tags, { Name = var.instance_name }) } resource "aws_eip" "instance" { vpc = true tags = merge(var.standard_tags, var.tags, { Name = var.instance_name }) } resource "aws_eip_association" "instance" { network_interface_id = aws_network_interface.instance.id allocation_id = aws_eip.instance.id } resource "aws_instance" "instance" { #availability_zone = var.azs[count.index % 2] tenancy = "default" ebs_optimized = true disable_api_termination = var.instance_termination_protection instance_initiated_shutdown_behavior = "stop" instance_type = var.instance_type key_name = "msoc-build" monitoring = false iam_instance_profile = "salt-master-instance-profile" ami = local.ami_map["master"] # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id. # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then # that could be removed. lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] } # These device definitions are optional, but added for clarity. root_block_device { volume_type = "gp2" #volume_size = "60" delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn } ebs_block_device { # swap device_name = "/dev/xvdm" volume_size = 48 delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly. # This may prompt replacement when the AMI is updated. # See: # https://github.com/hashicorp/terraform/issues/19958 # https://github.com/terraform-providers/terraform-provider-aws/issues/13118 snapshot_id = local.block_device_mappings["master"]["/dev/xvdm"].ebs.snapshot_id } ebs_block_device { # /home device_name = "/dev/xvdn" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings["master"]["/dev/xvdn"].ebs.snapshot_id } ebs_block_device { # /var device_name = "/dev/xvdo" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings["master"]["/dev/xvdo"].ebs.snapshot_id } ebs_block_device { # /var/tmp device_name = "/dev/xvdp" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings["master"]["/dev/xvdp"].ebs.snapshot_id } ebs_block_device { # /var/log device_name = "/dev/xvdq" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings["master"]["/dev/xvdq"].ebs.snapshot_id } ebs_block_device { # /var/log/audit device_name = "/dev/xvdr" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings["master"]["/dev/xvdr"].ebs.snapshot_id } ebs_block_device { # /tmp device_name = "/dev/xvds" # volume_size = xx delete_on_termination = true encrypted = true kms_key_id = data.aws_kms_key.ebs-key.arn snapshot_id = local.block_device_mappings["master"]["/dev/xvds"].ebs.snapshot_id } network_interface { device_index = 0 network_interface_id = aws_network_interface.instance.id } user_data = data.template_cloudinit_config.salt_master_cloud_init_config.rendered tags = merge( var.standard_tags, var.tags, { Name = var.instance_name }) } #Uncommnet this when we are ready to make the change. # module "private_dns_record" { # source = "../../submodules/dns/private_A_record" # name = var.instance_name # ip_addresses = [ aws_instance.instance.private_ip ] # dns_info = var.dns_info # reverse_enabled = var.reverse_enabled # providers = { # aws.c2 = aws.c2 # } # } # module "public_dns_record" { # source = "../../submodules/dns/public_A_record" # name = var.instance_name # ip_addresses = [ aws_eip.instance.public_ip ] # dns_info = var.dns_info # providers = { # aws.mdr-common-services-commercial = aws.mdr-common-services-commercial # } # } #The Cloud init data is to prepare the Salt Master for use. #This includes secrets from the AWS Secrets Manager, Github connectivity via SSH, and #prepopulating the salt master private key. May history judge me kindly. data "template_file" "salt_master_cloud_init" { # Should these be in a common directory? I suspect they'd be reusable template = "${file("${path.module}/cloud-init/cloud_init_salt_master.tpl")}" vars = { hostname = var.instance_name fqdn = "${var.instance_name}.${var.dns_info["private"]["zone"]}" environment = var.environment salt_master = var.salt_master proxy = var.proxy aws_partition = var.aws_partition aws_partition_alias = var.aws_partition_alias } } # Render a multi-part cloud-init config making use of the part # above, and other source files data "template_cloudinit_config" "salt_master_cloud_init_config" { gzip = true base64_encode = true # Main cloud-config configuration file. part { filename = "init.cfg" content_type = "text/cloud-config" content = "${data.template_file.salt_master_cloud_init.rendered}" } # Additional parts as needed part { content_type = "text/x-shellscript" content = "${file("${path.module}/cloud-init/provision_salt_master.sh")}" } } resource "aws_security_group" "salt_master_security_group" { name = "salt_master_security_group" description = "Security Group for Salt Master(s)" vpc_id = var.vpc_id tags = merge(var.standard_tags, var.tags) } resource "aws_security_group_rule" "http-out" { description = "For endpoints and troubleshooting" type = "egress" from_port = 80 to_port = 80 protocol = "tcp" cidr_blocks = [ "10.0.0.0/8" ] security_group_id = aws_security_group.salt_master_security_group.id } resource "aws_security_group_rule" "https-out" { description = "For endpoints and troubleshooting" type = "egress" from_port = 443 to_port = 443 protocol = "tcp" cidr_blocks = [ "10.0.0.0/8" ] security_group_id = aws_security_group.salt_master_security_group.id } resource "aws_security_group_rule" "saltstack" { description = "SaltStack" type = "ingress" from_port = "4505" to_port = "4506" protocol = "tcp" cidr_blocks = [ "10.0.0.0/8" ] security_group_id = aws_security_group.salt_master_security_group.id } resource "aws_security_group_rule" "saltstack-afs-pop" { description = "SaltStack - AFS POP" type = "ingress" from_port = "4505" to_port = "4506" protocol = "tcp" cidr_blocks = var.afs_pop security_group_id = aws_security_group.salt_master_security_group.id } resource "aws_security_group_rule" "saltstack-afs-azure-pop" { description = "SaltStack - AFS Azure POP" type = "ingress" from_port = "4505" to_port = "4506" protocol = "tcp" cidr_blocks = var.afs_azure_pop security_group_id = aws_security_group.salt_master_security_group.id } resource "aws_security_group_rule" "saltstack-nga-pop" { description = "SaltStack - NGA POP" type = "ingress" from_port = "4505" to_port = "4506" protocol = "tcp" cidr_blocks = var.nga_pop security_group_id = aws_security_group.salt_master_security_group.id } resource "aws_security_group_rule" "saltstack-xdr-interconnects" { description = "SaltStack - XDR Interconnects" type = "ingress" from_port = "4505" to_port = "4506" protocol = "tcp" cidr_blocks = var.xdr_interconnect security_group_id = aws_security_group.salt_master_security_group.id } #TODO: make this better #for now, just allow 22 outbound anywhere resource "aws_security_group_rule" "saltstack-github" { description = "SaltStack - Github Access" type = "egress" from_port = "22" to_port = "22" protocol = "tcp" cidr_blocks = [ "0.0.0.0/0" ] security_group_id = aws_security_group.salt_master_security_group.id }