123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202 |
- resource "aws_network_interface" "vmray-worker-interface" {
- count = local.instance_count
- subnet_id = var.private_subnets[count.index % 3]
- security_groups = [data.aws_security_group.typical-host.id, aws_security_group.vmray_worker_sg.id]
- description = "vmray-worker"
- tags = merge(local.standard_tags, var.tags, { Name = "vmray-worker" })
- }
- # Make /opt/vmray separate from the instance for greater margin of safety
- #resource "aws_ebs_volume" "worker_opt_vmray" {
- # count = local.instance_count
- # availability_zone = var.azs[count.index % 3]
- # size = var.vmray_worker_opt_vmray_size
- # type = "gp3"
- # encrypted = true
- # kms_key_id = data.aws_kms_key.ebs-key.arn
- #
- # tags = merge(local.standard_tags, var.tags, { Name = "vmray-worker-${count.index}" })
- #}
- #resource "aws_volume_attachment" "worker_opt_vmray" {
- # count = local.instance_count
- # device_name = "/dev/xvdf"
- # volume_id = aws_ebs_volume.worker_opt_vmray[count.index].id
- # instance_id = aws_instance.vmray-worker-instance[count.index].id
- #}
- resource "aws_instance" "vmray-worker-instance" {
- count = local.instance_count
- tenancy = "default"
- ebs_optimized = true
- disable_api_termination = var.instance_termination_protection
- instance_initiated_shutdown_behavior = "stop"
- instance_type = "c5n.metal"
- key_name = "msoc-build"
- monitoring = false
- iam_instance_profile = module.instance_profile.profile_id
- ami = data.aws_ami.ubuntu2004.image_id
- # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
- # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
- # that could be removed.
- lifecycle { ignore_changes = [ami, key_name, user_data, ebs_block_device] }
- metadata_options {
- http_endpoint = "enabled"
- # tfsec:ignore:aws-ec2-enforce-http-token-imds Saltstack doesn't use s3 sources appropriately
- http_tokens = "optional"
- }
- root_block_device {
- volume_type = "gp3"
- volume_size = var.environment == "prod" ? 300 : 100
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- }
- # Attached separately
- #ebs_block_device {
- # # /opt/vmray
- # # Note: Not in AMI
- # device_name = "/dev/xvdf"
- # volume_size = var.vmray_worker_opt_vmray_size
- # delete_on_termination = true
- # encrypted = true
- # kms_key_id = data.aws_kms_key.ebs-key.arn
- #}
- ebs_block_device {
- # swap
- device_name = "/dev/xvdm"
- #volume_size = 48
- volume_type = "gp3"
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
- # This may prompt replacement when the AMI is updated.
- # See:
- # https://github.com/hashicorp/terraform/issues/19958
- # https://github.com/terraform-providers/terraform-provider-aws/issues/13118
- snapshot_id = local.block_device_mappings["/dev/xvdm"].ebs.snapshot_id
- }
- ebs_block_device {
- # /home
- device_name = "/dev/xvdn"
- # volume_size = xx
- volume_type = "gp3"
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- snapshot_id = local.block_device_mappings["/dev/xvdn"].ebs.snapshot_id
- }
- # ebs_block_device {
- # # /var
- # device_name = "/dev/xvdo"
- # # volume_size = xx
- # volume_type = "gp3"
- # delete_on_termination = true
- # encrypted = true
- # kms_key_id = data.aws_kms_key.ebs-key.arn
- # snapshot_id = local.block_device_mappings["/dev/xvdo"].ebs.snapshot_id
- # }
- # ebs_block_device {
- # # /var/tmp
- # device_name = "/dev/xvdp"
- # # volume_size = xx
- # volume_type = "gp3"
- # delete_on_termination = true
- # encrypted = true
- # kms_key_id = data.aws_kms_key.ebs-key.arn
- # snapshot_id = local.block_device_mappings["/dev/xvdp"].ebs.snapshot_id
- # }
- ebs_block_device {
- # /var/log
- device_name = "/dev/xvdq"
- # volume_size = xx
- volume_type = "gp3"
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- snapshot_id = local.block_device_mappings["/dev/xvdq"].ebs.snapshot_id
- }
- ebs_block_device {
- # /var/log/audit
- device_name = "/dev/xvdr"
- # volume_size = xx
- volume_type = "gp3"
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- snapshot_id = local.block_device_mappings["/dev/xvdr"].ebs.snapshot_id
- }
- # ebs_block_device {
- # # /tmp
- # device_name = "/dev/xvds"
- # volume_size = 100
- # volume_type = "gp3"
- # delete_on_termination = true
- # encrypted = true
- # kms_key_id = data.aws_kms_key.ebs-key.arn
- # snapshot_id = local.block_device_mappings["/dev/xvds"].ebs.snapshot_id
- # }
- network_interface {
- device_index = 0
- network_interface_id = aws_network_interface.vmray-worker-interface[count.index].id
- }
- user_data = data.template_cloudinit_config.cloud-init-vmray-worker[count.index].rendered
- tags = merge(local.standard_tags, var.tags, { Name = "vmray-worker-${count.index}" })
- volume_tags = merge(local.standard_tags, var.tags, { Name = "vmray-worker-${count.index}" })
- }
- # Render a multi-part cloud-init config making use of the part
- # above, and other source files
- data "template_cloudinit_config" "cloud-init-vmray-worker" {
- count = local.instance_count
- gzip = true
- base64_encode = true
- # Main cloud-config configuration file.
- part {
- filename = "init.cfg"
- content_type = "text/cloud-config"
- content = templatefile("${path.module}/cloud-init/cloud-init.tpl",
- {
- hostname = "vmray-worker-${count.index}"
- fqdn = "vmray-worker-${count.index}.${var.dns_info["private"]["zone"]}"
- environment = var.environment
- salt_master = local.salt_master
- proxy = local.proxy
- aws_partition = var.aws_partition
- aws_partition_alias = var.aws_partition_alias
- aws_region = var.aws_region
- #ua_key = local.secret_ubuntu["ua_key"] # This is gathered in server.tf
- }
- )
- }
- # mount /dev/xvdf at /opt/vmray
- part {
- content_type = "text/cloud-boothook"
- content = file("${path.module}/cloud-init/opt_vmray.boothook")
- }
- }
- module "private_dns_record_vmray_worker" {
- count = local.instance_count
- source = "../../submodules/dns/private_A_record"
- name = "vmray-worker-${count.index}"
- ip_addresses = [aws_instance.vmray-worker-instance[count.index].private_ip]
- dns_info = var.dns_info
- reverse_enabled = true
- providers = {
- aws.c2 = aws.c2
- }
- }
|