Bladeren bron

Merge pull request #296 from mdr-engineering/feature/ftd_MSOCI-1274_VMRayTweaks

Tweaks for VMRay Before Prod Deployment
Frederick Damstra 3 jaren geleden
bovenliggende
commit
559ceb3a75

+ 141 - 0
base/vmray_instances/alb.tf

@@ -0,0 +1,141 @@
+#----------------------------------------------------------------------------
+# INTERNAL LB
+#----------------------------------------------------------------------------
+resource "aws_alb" "vmray_internal" {
+  name               = "vmray-alb-internal-${var.environment}"
+  security_groups    = [ aws_security_group.vmray_alb_internal.id ]
+  internal           = true 
+  subnets            = var.public_subnets
+  load_balancer_type = "application"
+
+  access_logs {
+    bucket  = "xdr-elb-${ var.environment }"
+    enabled = true
+  }
+
+  idle_timeout = 1200
+
+  tags = merge(var.standard_tags, var.tags, { Name = "vmray-alb-internal-${var.environment}" })
+}
+
+# Create a new target group
+resource "aws_alb_target_group" "vmray_internal" {
+  name                 = "vmray-alb-targets"
+  port                 = 443
+  protocol             = "HTTPS"
+  vpc_id               = var.vpc_id
+
+  health_check {
+    protocol = "HTTPS"
+    port     = "443"
+    path     = "/"
+    matcher  = "200,302"
+    timeout  = "4"
+    interval = "5"
+    unhealthy_threshold = 2
+    healthy_threshold   = 2
+  }
+
+  #stickiness {
+  #  type    = "lb_cookie"
+  #  enabled = false 
+  #}
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_lb_target_group_attachment" "vmray_internal" {
+  target_group_arn = aws_alb_target_group.vmray_internal.arn
+  target_id        = aws_instance.vmray-server-instance.id
+  port             = 443
+}
+
+# Create a new alb listener
+resource "aws_alb_listener" "vmray_https_internal" {
+  load_balancer_arn = aws_alb.vmray_internal.arn
+  port              = "443"
+  protocol          = "HTTPS"
+  ssl_policy        = "ELBSecurityPolicy-FS-1-2-Res-2019-08" # PFS, TLS1.2, most "restrictive" policy (took awhile to find that)
+  certificate_arn   = aws_acm_certificate.cert_private.arn
+
+  default_action {
+    target_group_arn = aws_alb_target_group.vmray_internal.arn
+    type             = "forward"
+  }
+}
+
+resource "aws_lb_listener" "vmray_listener_http" {
+  load_balancer_arn = aws_alb.vmray_internal.arn
+  port              = "80"
+  protocol          = "HTTP"
+
+  default_action {
+    type             = "redirect"
+
+    redirect {
+      port        = "443"
+      protocol    = "HTTPS"
+      status_code = "HTTP_301"
+    }
+  }
+}
+
+# #########################
+# # DNS Entry
+module "private_dns_record" {
+  source = "../../submodules/dns/private_CNAME_record"
+
+  name = "vmray"
+  target_dns_names = [ aws_alb.vmray_internal.dns_name ]
+  dns_info = var.dns_info
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}
+
+#----------------------------------------------------------------------------
+# ALB Security Group
+#----------------------------------------------------------------------------
+resource "aws_security_group" "vmray_alb_internal" {
+  vpc_id      = var.vpc_id
+  name        = "vmray-alb-sg-internal"
+  description = "ALB for Phantom"
+  tags = merge(var.standard_tags, var.tags)
+}
+
+#----------------------------------------------------------------------------
+# INGRESS
+#----------------------------------------------------------------------------
+resource "aws_security_group_rule" "http_from_local" {
+  description = "HTTP inbound from Internet"
+  type = "ingress"
+  from_port = "80"
+  to_port = "80"
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["vpc-access"]
+  security_group_id = aws_security_group.vmray_alb_internal.id
+}
+
+resource "aws_security_group_rule" "https_from_local" {
+  description = "HTTPS inbound from Internet"
+  type = "ingress"
+  from_port = "443"
+  to_port = "443"
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["vpc-access"]
+  security_group_id = aws_security_group.vmray_alb_internal.id
+}
+
+#----------------------------------------------------------------------------
+# EGRESS
+#----------------------------------------------------------------------------
+resource "aws_security_group_rule" "vmray_alb_to_server" {
+  description = "HTTPS to the Server"
+  type = "egress"
+  from_port = "443"
+  to_port = "443"
+  protocol = "tcp"
+  source_security_group_id = aws_security_group.vmray_server_sg.id
+  security_group_id = aws_security_group.vmray_alb_internal.id
+}

+ 8 - 0
base/vmray_instances/ami.tf

@@ -17,3 +17,11 @@ data "aws_ami" "ubuntu2004" {
     values = [ "MSOC_Ubuntu_2004_Minion*" ]
   }
 }
+
+locals {
+  # We need some data from the block devices
+  block_device_mappings = {
+    for bd in data.aws_ami.ubuntu2004.block_device_mappings:
+      bd.device_name => bd
+  }
+}

+ 37 - 0
base/vmray_instances/certificate.tf

@@ -0,0 +1,37 @@
+#----------------------------------------------------------------------------
+# Public DNS Certificate
+#----------------------------------------------------------------------------
+resource "aws_acm_certificate" "cert_private" {
+  domain_name       = "vmray.${var.dns_info["private"]["zone"]}"
+  validation_method = "DNS"
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_acm_certificate_validation" "cert_private" {
+  certificate_arn         = aws_acm_certificate.cert_private.arn
+  validation_record_fqdns = [for record in aws_route53_record.cert_validation_private: record.fqdn]
+}
+
+resource "aws_route53_record" "cert_validation_private" {
+  provider = aws.mdr-common-services-commercial
+
+  for_each = {
+    for dvo in aws_acm_certificate.cert_private.domain_validation_options : dvo.domain_name => {
+      name   = dvo.resource_record_name
+      record = dvo.resource_record_value
+      type   = dvo.resource_record_type
+    }
+  }
+
+  allow_overwrite = true
+  name            = each.value.name
+  records         = [each.value.record]
+  ttl             = 60
+  type            = each.value.type
+  zone_id         = var.dns_info["public"]["zone_id"] # private zones sitll use public dns for validation
+}

+ 4 - 3
base/vmray_instances/cloud-init/cloud-init.tpl

@@ -93,9 +93,10 @@ runcmd:
  - /usr/share/ubuntu-scap-security-guides/cis-hardening/Canonical_Ubuntu_20.04_CIS-harden.sh lvl2_server
  - apt update 
  - apt upgrade -y
- - apt install -y firewalld
- - /bin/systemctl start firewalld
- - /bin/systemctl enable firewalld
+ # VMRay is incompatible with firewalld, but this should be enabled for other ubuntu systems
+ #- apt install -y firewalld
+ #- /bin/systemctl start firewalld
+ #- /bin/systemctl enable firewalld
  - /bin/systemctl restart salt-minion
  - /bin/systemctl enable salt-minion
  - /bin/systemctl start snap.amazon-ssm-agent.amazon-ssm-agent.service

+ 2 - 1
base/vmray_instances/security-groups.tf

@@ -16,7 +16,8 @@ resource "aws_security_group_rule" "vmray_server_https_in" {
   from_port         = 443
   to_port           = 443
   protocol          = "tcp"
-  cidr_blocks       = var.cidr_map["vpc-access"]
+  #cidr_blocks       = var.cidr_map["vpc-access"]
+  source_security_group_id = aws_security_group.vmray_alb_internal.id
   security_group_id = aws_security_group.vmray_server_sg.id
 }
 

+ 76 - 0
base/vmray_instances/server.tf

@@ -70,6 +70,82 @@ resource "aws_instance" "vmray-server-instance" {
       kms_key_id = data.aws_kms_key.ebs-key.arn
   }
 
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    #volume_size = 48
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdn"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    volume_size = 8 # vmray requires >= 5
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvds"].ebs.snapshot_id
+  }
+
   network_interface {
     device_index = 0
     network_interface_id = aws_network_interface.vmray-server-interface.id

+ 76 - 0
base/vmray_instances/worker.tf

@@ -31,6 +31,82 @@ resource "aws_instance" "vmray-worker-instance" {
       kms_key_id = data.aws_kms_key.ebs-key.arn
   }
 
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    #volume_size = 48
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdn"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    # volume_size = xx
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    volume_size = 8 # vmray requires >= 5
+    volume_type = "gp3"
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["/dev/xvds"].ebs.snapshot_id
+  }
+
   network_interface {
     device_index = 0
     network_interface_id = aws_network_interface.vmray-worker-interface[count.index].id