Browse Source

Updates the Github Load Balancer Architecture

* NLB (with static IPs)
* Port 22 goes directly to serer
* Port 80 and 443 go to an ALB
* ALB goes to server

WAF is not yet enabled. Should just be a matter of changing false to
true, and then sorting through all the false positives, because git
isn't going to like being behind a WAF.

To be tagged v4.2.4
Fred Damstra [afs macbook] 3 years ago
parent
commit
8e0498865b

+ 60 - 56
base/github/backup_server.tf

@@ -1,49 +1,53 @@
 # Some instance variables
 locals {
-  ami_selection       = "minion" # master, minion, ...
+  ami_selection = "minion" # master, minion, ...
 }
 
 resource "aws_network_interface" "ghe-backup-interface" {
-  subnet_id = var.private_subnets[0]
-  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.ghe_backup_server.id ]
-  description = "ghe-backup"
-  tags = merge(var.standard_tags, var.tags, { Name = "ghe-backup" })
+  subnet_id       = var.private_subnets[0]
+  security_groups = [data.aws_security_group.typical-host.id, aws_security_group.ghe_backup_server.id]
+  description     = "ghe-backup"
+  tags            = merge(var.standard_tags, var.tags, { Name = "ghe-backup" })
 }
 
 resource "aws_instance" "ghe-backup-instance" {
-  tenancy = "default"
-  ebs_optimized = true
-  disable_api_termination = var.instance_termination_protection
+  tenancy                              = "default"
+  ebs_optimized                        = true
+  disable_api_termination              = var.instance_termination_protection
   instance_initiated_shutdown_behavior = "stop"
-  instance_type = var.backup_instance_type
-  key_name = "msoc-build"
-  monitoring = false
-  iam_instance_profile = "msoc-default-instance-profile"
+  instance_type                        = var.backup_instance_type
+  key_name                             = "msoc-build"
+  monitoring                           = false
+  iam_instance_profile                 = "msoc-default-instance-profile"
+
+  metadata_options {
+    http_tokens = "required"
+  }
 
   ami = local.ami_map[local.ami_selection]
   # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
   # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
   # that could be removed.
-  lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+  lifecycle { ignore_changes = [ami, key_name, user_data, ebs_block_device] }
   #lifecycle { ignore_changes = [ ami, key_name, user_data ] }
 
   # These device definitions are optional, but added for clarity.
   root_block_device {
-      volume_type = "gp3"
-      #volume_size = "60"
-      delete_on_termination = true
-      encrypted = true
-      kms_key_id = data.aws_kms_key.ebs-key.arn
+    volume_type = "gp3"
+    #volume_size = "60"
+    delete_on_termination = true
+    encrypted             = true
+    kms_key_id            = data.aws_kms_key.ebs-key.arn
   }
 
   ebs_block_device {
     # swap
-    device_name = "/dev/xvdm"
-    volume_type = "gp3"
-    volume_size = 8
+    device_name           = "/dev/xvdm"
+    volume_type           = "gp3"
+    volume_size           = 8
     delete_on_termination = true
-    encrypted = true
-    kms_key_id = data.aws_kms_key.ebs-key.arn
+    encrypted             = true
+    kms_key_id            = data.aws_kms_key.ebs-key.arn
     # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
     # This may prompt replacement when the AMI is updated.
     # See:
@@ -57,9 +61,9 @@ resource "aws_instance" "ghe-backup-instance" {
     volume_type = "gp3"
     # volume_size = xx
     delete_on_termination = true
-    encrypted = true
-    kms_key_id = data.aws_kms_key.ebs-key.arn
-    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+    encrypted             = true
+    kms_key_id            = data.aws_kms_key.ebs-key.arn
+    snapshot_id           = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
 
   }
   ebs_block_device {
@@ -68,9 +72,9 @@ resource "aws_instance" "ghe-backup-instance" {
     volume_type = "gp3"
     # volume_size = xx
     delete_on_termination = true
-    encrypted = true
-    kms_key_id = data.aws_kms_key.ebs-key.arn
-    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+    encrypted             = true
+    kms_key_id            = data.aws_kms_key.ebs-key.arn
+    snapshot_id           = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
   }
   ebs_block_device {
     # /var/tmp
@@ -78,9 +82,9 @@ resource "aws_instance" "ghe-backup-instance" {
     volume_type = "gp3"
     # volume_size = xx
     delete_on_termination = true
-    encrypted = true
-    kms_key_id = data.aws_kms_key.ebs-key.arn
-    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+    encrypted             = true
+    kms_key_id            = data.aws_kms_key.ebs-key.arn
+    snapshot_id           = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
   }
   ebs_block_device {
     # /var/log
@@ -88,9 +92,9 @@ resource "aws_instance" "ghe-backup-instance" {
     volume_type = "gp3"
     # volume_size = xx
     delete_on_termination = true
-    encrypted = true
-    kms_key_id = data.aws_kms_key.ebs-key.arn
-    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+    encrypted             = true
+    kms_key_id            = data.aws_kms_key.ebs-key.arn
+    snapshot_id           = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
   }
   ebs_block_device {
     # /var/log/audit
@@ -98,9 +102,9 @@ resource "aws_instance" "ghe-backup-instance" {
     volume_type = "gp3"
     # volume_size = xx
     delete_on_termination = true
-    encrypted = true
-    kms_key_id = data.aws_kms_key.ebs-key.arn
-    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+    encrypted             = true
+    kms_key_id            = data.aws_kms_key.ebs-key.arn
+    snapshot_id           = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
   }
   ebs_block_device {
     # /tmp
@@ -108,19 +112,19 @@ resource "aws_instance" "ghe-backup-instance" {
     volume_type = "gp3"
     # volume_size = xx
     delete_on_termination = true
-    encrypted = true
-    kms_key_id = data.aws_kms_key.ebs-key.arn
-    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+    encrypted             = true
+    kms_key_id            = data.aws_kms_key.ebs-key.arn
+    snapshot_id           = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
   }
 
   network_interface {
-    device_index = 0
+    device_index         = 0
     network_interface_id = aws_network_interface.ghe-backup-interface.id
   }
 
-  user_data = data.template_cloudinit_config.cloud-init.rendered
-  tags = merge( var.standard_tags, var.tags, var.backup_instance_tags, { Name = "ghe-backup" })
-  volume_tags = merge( var.standard_tags, var.tags, { Name = "ghe-backup" })
+  user_data   = data.template_cloudinit_config.cloud-init.rendered
+  tags        = merge(var.standard_tags, var.tags, var.backup_instance_tags, { Name = "ghe-backup" })
+  volume_tags = merge(var.standard_tags, var.tags, { Name = "ghe-backup" })
 }
 
 # Render a multi-part cloud-init config making use of the part
@@ -133,16 +137,16 @@ data "template_cloudinit_config" "cloud-init" {
   part {
     filename     = "init.cfg"
     content_type = "text/cloud-config"
-    content      = templatefile("${path.module}/cloud-init/cloud-init.tpl",
+    content = templatefile("${path.module}/cloud-init/cloud-init.tpl",
       {
-        hostname = "ghe-backup"
-        fqdn = "ghe-backup.${var.dns_info["private"]["zone"]}"
-        environment = var.environment
-        salt_master  = var.salt_master
-        proxy = var.proxy
-        aws_partition = var.aws_partition
+        hostname            = "ghe-backup"
+        fqdn                = "ghe-backup.${var.dns_info["private"]["zone"]}"
+        environment         = var.environment
+        salt_master         = var.salt_master
+        proxy               = var.proxy
+        aws_partition       = var.aws_partition
         aws_partition_alias = var.aws_partition_alias
-        aws_region = var.aws_region
+        aws_region          = var.aws_region
       }
     )
   }
@@ -157,9 +161,9 @@ data "template_cloudinit_config" "cloud-init" {
 module "private_dns_record_ghe_backup" {
   source = "../../submodules/dns/private_A_record"
 
-  name = "ghe-backup"
-  ip_addresses = [ aws_instance.ghe-backup-instance.private_ip ]
-  dns_info = var.dns_info
+  name            = "ghe-backup"
+  ip_addresses    = [aws_instance.ghe-backup-instance.private_ip]
+  dns_info        = var.dns_info
   reverse_enabled = var.reverse_enabled
 
   providers = {

+ 222 - 0
base/github/elb.tf

@@ -0,0 +1,222 @@
+# Architecture:
+# 1. DNS points to an NLB
+# 2. NLB:22 forwards to instance:22
+# 3. NLB:443 forward to an ALB, which forwards to the instance
+# 4. NLB:80 forwards to the same ALB, which forwards to the instance.
+#
+# The module "static_nlb_to_alb" takes care of #3, but the rest
+# we have to handle here.
+#
+# tfsec:ignore:aws-elb-alb-not-public Purposefully public
+module "elb" {
+  source = "../../submodules/load_balancer/static_nlb_to_alb"
+
+  name                      = "github"
+  subject_alternative_names = ["*.github.${var.dns_info["public"]["zone"]}"]
+  target_ids                = aws_instance.ghe[*].id
+  listener_port             = 443
+  target_port               = 443
+  target_protocol           = "HTTPS"
+  target_security_group     = aws_security_group.ghe_server.id
+  allow_from_any            = true
+  redirect_80               = false # GitHub handles port 80, and needs it for LetsEncrypt
+
+  # WAF variables
+  waf_enabled = false # TODO: Turn this on
+  #excluded_rules_AWSManagedRulesCommonRuleSet = [ "SizeRestrictions_BODY" ]
+  #excluded_rules_AWSManagedRulesAmazonIpReputationList = []
+  #excluded_rules_AWSManagedRulesKnownBadInputsRuleSet = []
+  #excluded_rules_AWSManagedRulesSQLiRuleSet = []
+  #excluded_rules_AWSManagedRulesLinuxRuleSet = []
+  #excluded_rules_AWSManagedRulesUnixRuleSet = []
+  #additional_blocked_ips = []
+  #allowed_ips = []
+  #admin_ips = []
+
+  # Optional Variables
+  healthcheck_port     = 443
+  healthcheck_protocol = "HTTPS"
+  healthcheck_path     = "/status"
+  healthcheck_matcher  = "200"
+  stickiness           = false
+
+  # Inherited Variables 
+  tags           = merge(var.standard_tags, var.tags)
+  dns_info       = var.dns_info
+  public_subnets = var.public_subnets
+  environment    = var.environment
+  aws_partition  = var.aws_partition
+  aws_region     = var.aws_region
+  aws_account_id = var.aws_account_id
+  vpc_id         = var.vpc_id
+
+  providers = {
+    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+    aws.c2                             = aws.c2
+  }
+}
+
+# Github Needs a Wildcard Record
+module "public_dns_record_wildcard" {
+  source = "../../submodules/dns/public_ALIAS_record"
+
+  name            = "*.github.${var.dns_info["public"]["zone"]}"
+  target_dns_name = module.elb.nlb.dns_name
+  target_zone_id  = module.elb.nlb.zone_id
+  dns_info        = var.dns_info
+
+  providers = {
+    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+  }
+}
+
+#################################
+# Add port 80 to the ALB and NLB
+#
+# GHE uses LetsEncrypt, which needs access on port 80.
+
+# ALB side
+resource "aws_lb_target_group" "github_alb_80" {
+  name_prefix = "gita80"
+  port        = 80
+  protocol    = "HTTP"
+  vpc_id      = var.vpc_id
+
+  health_check {
+    protocol = "HTTPS"
+    port     = 443
+    path     = "/status"
+    matcher  = "200"
+    timeout  = "4"
+    interval = "5"
+  }
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_lb_target_group_attachment" "github_alb_80" {
+  for_each = toset(aws_instance.ghe[*].id)
+
+  target_group_arn = aws_lb_target_group.github_alb_80.arn
+  target_id        = each.value
+  port             = 80
+}
+
+resource "aws_lb_listener" "github_alb_80" {
+  load_balancer_arn = module.elb.alb_id
+  port              = "80" # tfsec:ignore:aws-elb-http-not-used HTTP only used for letsencrypt and redirect
+  protocol          = "HTTP"
+
+  default_action {
+    type             = "forward"
+    target_group_arn = aws_lb_target_group.github_alb_80.arn
+  }
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group_rule" "github_alb_80" {
+  description       = "Github - Allow 80 from any"
+  type              = "ingress"
+  from_port         = 80
+  to_port           = 80
+  protocol          = "tcp"
+  cidr_blocks       = ["0.0.0.0/0"] # tfsec:ignore:aws-vpc-no-public-ingress-sgr Intentionally Open
+  security_group_id = module.elb.security_group_id
+}
+
+resource "aws_security_group_rule" "github_alb_80_out" {
+  description              = "Github - Allow 80 to the instances"
+  type                     = "egress"
+  from_port                = 80
+  to_port                  = 80
+  protocol                 = "tcp"
+  source_security_group_id = aws_security_group.ghe_server.id
+  security_group_id        = module.elb.security_group_id
+}
+
+# NLB Side
+resource "aws_lb_target_group" "github_nlb_80" {
+  name_prefix = "gitn80"
+  target_type = "alb"
+  port        = 80
+  protocol    = "TCP"
+  vpc_id      = var.vpc_id
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_lb_target_group_attachment" "github_nlb_80" {
+  target_group_arn = aws_lb_target_group.github_nlb_80.arn
+  target_id        = module.elb.alb_id
+  port             = 80
+}
+
+resource "aws_lb_listener" "github_nlb_80" {
+  load_balancer_arn = module.elb.nlb_id
+  port              = "80"
+  protocol          = "TCP" # tfsec:ignore:aws-elb-http-not-used HTTP only for letsencrypt and redirects
+
+  default_action {
+    type             = "forward"
+    target_group_arn = aws_lb_target_group.github_nlb_80.arn
+  }
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+##########################
+# Add port 22 to the NLB
+resource "aws_lb_target_group" "github_ssh" {
+  name_prefix = "gitssh"
+  port        = 22
+  protocol    = "TCP"
+  vpc_id      = var.vpc_id
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_lb_target_group_attachment" "github_ssh" {
+  for_each = toset(aws_instance.ghe[*].id)
+
+  target_group_arn = aws_lb_target_group.github_ssh.arn
+  target_id        = each.value
+  port             = 22
+}
+
+resource "aws_lb_listener" "github_ssh" {
+  load_balancer_arn = module.elb.nlb_id
+  port              = "22"
+  protocol          = "TCP"
+
+  default_action {
+    type             = "forward"
+    target_group_arn = aws_lb_target_group.github_ssh.arn
+  }
+
+  lifecycle {
+    create_before_destroy = true
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}

+ 46 - 122
base/github/elbclassic.tf

@@ -1,145 +1,69 @@
-module "public_dns_record" {
-  source = "../../submodules/dns/public_ALIAS_record"
-
-  name = "github.${var.dns_info["public"]["zone"]}"
-  target_dns_name = aws_elb.external.dns_name
-  target_zone_id  = aws_elb.external.zone_id
-  dns_info = var.dns_info
-
-  providers = {
-    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
-  }
-}
-
-module "public_dns_record_wildcard" {
-  source = "../../submodules/dns/public_ALIAS_record"
-
-  name = "*.github.${var.dns_info["public"]["zone"]}"
-  target_dns_name = aws_elb.external.dns_name
-  target_zone_id  = aws_elb.external.zone_id
-  dns_info = var.dns_info
-
-  providers = {
-    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
-  }
-}
-
-# If ever this gets converted to an ALB, consider adding the waf module.
-resource "aws_elb" "external" {
-    name_prefix = "gheext"
-    subnets     = var.public_subnets
-    security_groups  = [ aws_security_group.ghe_elb_external.id ]
-
-    listener {
-        instance_port      = 443
-        instance_protocol  = "HTTPS"
-        lb_port            = 443
-        lb_protocol        = "HTTPS"
-        ssl_certificate_id = aws_acm_certificate.cert_public.arn
-    }
-
-    listener {
-        instance_port      = 80
-        instance_protocol  = "HTTP"
-        lb_port            = 80
-        lb_protocol        = "HTTP"
-    }
-
-    listener {
-        instance_port     = 23
-        instance_protocol = "TCP"
-        lb_port           = 22
-        lb_protocol       = "TCP"
-    }
-
-    health_check {
-        healthy_threshold   = 2
-        unhealthy_threshold = 2
-        timeout             = 3
-        target              = "HTTPS:443/status"
-        interval            = 30
-    }
-}
-
-resource "aws_proxy_protocol_policy" "external_proxy_protocol" {
-    load_balancer  = aws_elb.external.name
-    instance_ports = [ "23", "444" ]
-}
-
-# Create a new load balancer attachment
-resource "aws_elb_attachment" "external_attachment" {
-    count    = var.instance_count
-    elb      = aws_elb.external.id
-    instance = aws_instance.ghe[count.index].id
-}
-
-
 #--------------------------------------------------------------
 # Internal ELB
 #--------------------------------------------------------------
 resource "aws_route53_record" "github_internal" {
-  zone_id = var.dns_info["private"]["zone_id"]
-  name    = "github.${var.dns_info["private"]["zone"]}"
-  type    = "CNAME"
-  records = [aws_elb.internal.dns_name]
-  ttl = "60"
+  zone_id  = var.dns_info["private"]["zone_id"]
+  name     = "github.${var.dns_info["private"]["zone"]}"
+  type     = "CNAME"
+  records  = [aws_elb.internal.dns_name]
+  ttl      = "60"
   provider = aws.c2
 }
 
 resource "aws_route53_record" "github_internal_wildcard" {
-  zone_id = var.dns_info["private"]["zone_id"]
-  name    = "*.github.${var.dns_info["private"]["zone"]}"
-  type    = "CNAME"
-  records = [aws_elb.internal.dns_name]
-  ttl = "60"
+  zone_id  = var.dns_info["private"]["zone_id"]
+  name     = "*.github.${var.dns_info["private"]["zone"]}"
+  type     = "CNAME"
+  records  = [aws_elb.internal.dns_name]
+  ttl      = "60"
   provider = aws.c2
 }
 
 resource "aws_elb" "internal" {
-    name_prefix = "gheint"
-    internal    = true
-    subnets     = var.private_subnets
-    security_groups  = [ aws_security_group.ghe_elb_internal.id ]
-
-    listener {
-        instance_port      = 443
-        instance_protocol  = "HTTPS"
-        lb_port            = 443
-        lb_protocol        = "HTTPS"
-        ssl_certificate_id = aws_acm_certificate.cert.arn
-    }
+  name_prefix     = "gheint"
+  internal        = true
+  subnets         = var.private_subnets
+  security_groups = [aws_security_group.ghe_elb_internal.id]
+
+  listener {
+    instance_port      = 443
+    instance_protocol  = "HTTPS"
+    lb_port            = 443
+    lb_protocol        = "HTTPS"
+    ssl_certificate_id = aws_acm_certificate.cert.arn
+  }
 
-    listener {
-        instance_port      = 8444
-        instance_protocol  = "TCP"
-        lb_port            = 8443
-        lb_protocol        = "TCP"
-    }
+  listener {
+    instance_port     = 8444
+    instance_protocol = "TCP"
+    lb_port           = 8443
+    lb_protocol       = "TCP"
+  }
 
-    listener {
-        instance_port     = 23
-        instance_protocol = "TCP"
-        lb_port           = 22
-        lb_protocol       = "TCP"
-    }
+  listener {
+    instance_port     = 23
+    instance_protocol = "TCP"
+    lb_port           = 22
+    lb_protocol       = "TCP"
+  }
 
-    health_check {
-        healthy_threshold   = 2
-        unhealthy_threshold = 2
-        timeout             = 3
-        target              = "HTTPS:443/status"
-        interval            = 30
-    }
+  health_check {
+    healthy_threshold   = 2
+    unhealthy_threshold = 2
+    timeout             = 3
+    target              = "HTTPS:443/status"
+    interval            = 30
+  }
 }
 
 resource "aws_proxy_protocol_policy" "internal_proxy_protocol" {
-     load_balancer  = aws_elb.internal.name
-     instance_ports = [ "23", "444", "8444"]
+  load_balancer  = aws_elb.internal.name
+  instance_ports = ["23", "444", "8444"]
 }
 
 # Create a new load balancer attachment
 resource "aws_elb_attachment" "internal_attachment" {
-    count    = var.instance_count
-    elb      = aws_elb.internal.id
-    instance = aws_instance.ghe[count.index].id
+  count    = var.instance_count
+  elb      = aws_elb.internal.id
+  instance = aws_instance.ghe[count.index].id
 }

+ 5 - 1
base/github/github_servers.tf

@@ -24,7 +24,11 @@ resource "aws_instance" "ghe" {
   instance_initiated_shutdown_behavior = "stop"
   key_name                             = "msoc-build"
   monitoring                           = false
-  iam_instance_profile                 = "github_instance_profile"
+  iam_instance_profile                 = module.instance_profile.profile_id
+
+  metadata_options {
+    http_tokens = "required"
+  }
 
   # single space to disable default module behavior
   root_block_device {

+ 6 - 44
base/github/instance_profile.tf

@@ -3,49 +3,11 @@
 #
 # Includes policies for GitHub Enterprise:
 #  * Same policies as the default instance profile
-resource "aws_iam_instance_profile" "github_instance_profile" {
-  name = "xdr-github-instance-profile"
-  path = "/instance/"
-  role = aws_iam_role.github_instance_role.name
-}
-
-resource "aws_iam_role" "github_instance_role" {
-  name               = "xdr-github-instance-role"
-  path               = "/instance/"
-  assume_role_policy = <<EOF
-{
-    "Version": "2012-10-17",
-    "Statement": [
-      {
-        "Sid": "",
-        "Effect": "Allow",
-        "Principal": {
-          "Service": [
-            "ec2.amazonaws.com",
-            "ssm.amazonaws.com"
-            ]
-        },
-        "Action": "sts:AssumeRole"
-      }
-    ]
-  }
-EOF
-}
-
-# These 3 are the default profile attachments:
-resource "aws_iam_role_policy_attachment" "github_instance_AmazonEC2RoleforSSM" {
-  role       = aws_iam_role.github_instance_role.name
-  policy_arn = "arn:${var.aws_partition}:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
-}
-
-resource "aws_iam_role_policy_attachment" "github_instance_default_policy_attach" {
-  role       = aws_iam_role.github_instance_role.name
-  policy_arn = "arn:${var.aws_partition}:iam::${var.aws_account_id}:policy/launchroles/default_instance_tag_read"
-}
-
-resource "aws_iam_role_policy_attachment" "github_instance_cloudwatch_policy_attach" {
-  role       = aws_iam_role.github_instance_role.name
-  policy_arn = "arn:${var.aws_partition}:iam::${var.aws_account_id}:policy/cloudwatch_events"
+module "instance_profile" {
+  source         = "../../submodules/iam/base_instance_profile"
+  prefix         = "xdr-github"
+  aws_partition  = var.aws_partition
+  aws_account_id = var.aws_account_id
 }
 
 # GitHub Enterprise Specific Policy
@@ -116,6 +78,6 @@ data "aws_iam_policy_document" "github_instance_policy_doc" {
 }
 
 resource "aws_iam_role_policy_attachment" "github_instance_policy_attach" {
-  role       = aws_iam_role.github_instance_role.name
+  role       = module.instance_profile.role_id
   policy_arn = aws_iam_policy.github_instance_policy.arn
 }

+ 55 - 76
base/github/securitygroup-server.tf

@@ -1,7 +1,6 @@
 # SG Summary - Server
 #
-#   22 - From vpc-access
-#   23 - From Load Balancers
+#   22 - From anywhere
 #   122 - From vpc-access, ghe-backup
 #   443-444 - From Load Balancers, vpc-access
 #   8443 - From vpc-access, GHE-Backup
@@ -9,7 +8,7 @@
 #   
 resource "aws_security_group" "ghe_server" {
   name_prefix = "ghe_server"
-  tags = merge( var.standard_tags, var.tags, { Name = "github-enterprise-server" } )
+  tags        = merge(var.standard_tags, var.tags, { Name = "github-enterprise-server" })
   vpc_id      = var.vpc_id
   description = "GitHub Enterprise Servers and Backup Servers"
 }
@@ -17,39 +16,19 @@ resource "aws_security_group" "ghe_server" {
 #-----------------------------------------------------------------
 # Inbound access
 #-----------------------------------------------------------------
-resource "aws_security_group_rule" "ghe_server_inbound_ssh_cidr" {
-  security_group_id        = aws_security_group.ghe_server.id
-  type                     = "ingress"
-  cidr_blocks              = var.cidr_map["vpc-access"]
-  from_port                = 22
-  to_port                  = 22
-  protocol                 = "tcp"
-  description              = "Inbound ssh (for git)"
-}
-
-resource "aws_security_group_rule" "ghe_server_inbound_https_external_elb_23" {
-  security_group_id        = aws_security_group.ghe_server.id
-  source_security_group_id = aws_security_group.ghe_elb_external.id
-  type                     = "ingress"
-  from_port                = 23
-  to_port                  = 23
-  protocol                 = "tcp"
-  description              = "Inbound tcp/23 (ssh-proxy) from external ELBs"
-}
-
-resource "aws_security_group_rule" "ghe_server_inbound_https_internal_elb_23" {
-  security_group_id        = aws_security_group.ghe_server.id
-  source_security_group_id = aws_security_group.ghe_elb_internal.id
-  type                     = "ingress"
-  from_port                = 23
-  to_port                  = 23
-  protocol                 = "tcp"
-  description              = "Inbound tcp/23 (ssh-proxy) from internal ELBs"
+resource "aws_security_group_rule" "ghe_server_inbound_22" {
+  security_group_id = aws_security_group.ghe_server.id
+  type              = "ingress"
+  from_port         = 22
+  to_port           = 22
+  protocol          = "tcp"
+  description       = "Inbound tcp/22 (ssh) from external IPs (through NLB)"
+  cidr_blocks       = ["0.0.0.0/0"] # tfsec:ignore:aws-vpc-no-public-ingress-sgr Intentionally Open
 }
 
 resource "aws_security_group_rule" "ghe_server_inbound_external_elb_80" {
   security_group_id        = aws_security_group.ghe_server.id
-  source_security_group_id = aws_security_group.ghe_elb_external.id
+  source_security_group_id = module.elb.security_group_id
   type                     = "ingress"
   from_port                = 80
   to_port                  = 80
@@ -58,13 +37,13 @@ resource "aws_security_group_rule" "ghe_server_inbound_external_elb_80" {
 }
 
 resource "aws_security_group_rule" "ghe_server_inbound_mgmt_ssh_cidr" {
-  security_group_id        = aws_security_group.ghe_server.id
-  type                     = "ingress"
-  cidr_blocks              = var.cidr_map["vpc-access"]
-  from_port                = 122
-  to_port                  = 122
-  protocol                 = "tcp"
-  description              = "Inbound ssh (for mgmt)"
+  security_group_id = aws_security_group.ghe_server.id
+  type              = "ingress"
+  cidr_blocks       = var.cidr_map["vpc-access"]
+  from_port         = 122
+  to_port           = 122
+  protocol          = "tcp"
+  description       = "Inbound ssh (for mgmt)"
 }
 
 resource "aws_security_group_rule" "ghe_server_inbound_mgmt_ssh_sgs" {
@@ -88,18 +67,18 @@ resource "aws_security_group_rule" "ghe_server_inbound_mgmt_ssh_backup_sgs" {
 }
 
 resource "aws_security_group_rule" "ghe_server_inbound_https_cidr" {
-  security_group_id        = aws_security_group.ghe_server.id
-  type                     = "ingress"
-  cidr_blocks              = var.cidr_map["vpc-access"]
-  from_port                = 443
-  to_port                  = 444
-  protocol                 = "tcp"
-  description              = "Inbound https"
+  security_group_id = aws_security_group.ghe_server.id
+  type              = "ingress"
+  cidr_blocks       = var.cidr_map["vpc-access"]
+  from_port         = 443
+  to_port           = 444
+  protocol          = "tcp"
+  description       = "Inbound https"
 }
 
 resource "aws_security_group_rule" "ghe_server_inbound_https_external_elb" {
   security_group_id        = aws_security_group.ghe_server.id
-  source_security_group_id = aws_security_group.ghe_elb_external.id
+  source_security_group_id = module.elb.security_group_id
   type                     = "ingress"
   from_port                = 443
   to_port                  = 444
@@ -118,13 +97,13 @@ resource "aws_security_group_rule" "ghe_server_inbound_https_internal_elb" {
 }
 
 resource "aws_security_group_rule" "ghe_server_inbound_mgmt_https_cidr" {
-  security_group_id        = aws_security_group.ghe_server.id
-  type                     = "ingress"
-  cidr_blocks              = var.cidr_map["vpc-access"]
-  from_port                = 8443
-  to_port                  = 8444
-  protocol                 = "tcp"
-  description              = "Inbound https (for mgmt)"
+  security_group_id = aws_security_group.ghe_server.id
+  type              = "ingress"
+  cidr_blocks       = var.cidr_map["vpc-access"]
+  from_port         = 8443
+  to_port           = 8444
+  protocol          = "tcp"
+  description       = "Inbound https (for mgmt)"
 }
 
 resource "aws_security_group_rule" "ghe_server_inbound_mgmt_https_sgs" {
@@ -159,7 +138,7 @@ resource "aws_security_group_rule" "ghe_server_inbound_https_internal_elb_8444"
 
 resource "aws_security_group_rule" "ghe_server_inbound_https_external_elb_8444" {
   security_group_id        = aws_security_group.ghe_server.id
-  source_security_group_id = aws_security_group.ghe_elb_external.id
+  source_security_group_id = module.elb.security_group_id
   type                     = "ingress"
   from_port                = 8443
   to_port                  = 8444
@@ -171,33 +150,33 @@ resource "aws_security_group_rule" "ghe_server_inbound_https_external_elb_8444"
 # Outbound access
 #-----------------------------------------------------------------
 resource "aws_security_group_rule" "ghe_server_outbound_http" {
-  security_group_id        = aws_security_group.ghe_server.id
-  type                     = "egress"
-  cidr_blocks              = [ "0.0.0.0/0" ]
-  from_port                = 80
-  to_port                  = 80
-  protocol                 = "tcp"
-  description              = "Outbound http for letsencrypt"
+  security_group_id = aws_security_group.ghe_server.id
+  type              = "egress"
+  cidr_blocks       = ["0.0.0.0/0"] # tfsec:ignore:aws-vpc-no-public-egress-sgr Purposefully accessible
+  from_port         = 80
+  to_port           = 80
+  protocol          = "tcp"
+  description       = "Outbound http for letsencrypt"
 }
 
 resource "aws_security_group_rule" "ghe_server_outbound_https" {
-  security_group_id        = aws_security_group.ghe_server.id
-  type                     = "egress"
-  cidr_blocks              = [ "0.0.0.0/0" ]
-  from_port                = 443
-  to_port                  = 443
-  protocol                 = "tcp"
-  description              = "Outbound https for letsencrypt"
+  security_group_id = aws_security_group.ghe_server.id
+  type              = "egress"
+  cidr_blocks       = ["0.0.0.0/0"] # tfsec:ignore:aws-vpc-no-public-egress-sgr Purposefully accessible
+  from_port         = 443
+  to_port           = 443
+  protocol          = "tcp"
+  description       = "Outbound https for letsencrypt"
 }
 
 resource "aws_security_group_rule" "ghe_server_outbound_syslog" {
-  security_group_id        = aws_security_group.ghe_server.id
-  type                     = "egress"
-  cidr_blocks              = var.cidr_map["vpc-splunk"]
-  from_port                = 1514
-  to_port                  = 1514
-  protocol                 = "tcp"
-  description              = "Outbound syslog"
+  security_group_id = aws_security_group.ghe_server.id
+  type              = "egress"
+  cidr_blocks       = var.cidr_map["vpc-splunk"]
+  from_port         = 1514
+  to_port           = 1514
+  protocol          = "tcp"
+  description       = "Outbound syslog"
 }
 
 

+ 26 - 94
base/github/securitygroups-load-balancers.tf

@@ -20,8 +20,8 @@ locals {
   ]
   salt_masters = [
     "18.253.198.129/32", # Salt Master Prod - proxy
-    "18.253.73.251/32", # salt master prod
-    "18.252.61.81/32", # Salt master dev - proxy
+    "18.253.73.251/32",  # salt master prod
+    "18.252.61.81/32",   # Salt master dev - proxy
     "18.253.226.199/32", # salt aster dev
   ]
   # Locking down sources on 2021-12-10 due to log4j vulnerability
@@ -29,75 +29,7 @@ locals {
   #allowed_sources = concat(var.trusted_ips, local.zscalar_cidrs)
   #allowed_sources = concat(local.zscalar_cidrs, var.trusted_ips, local.salt_masters)
   # Restored access on 2021-12-14
-  allowed_sources = [ "0.0.0.0/0" ]
-}
-
-resource "aws_security_group" "ghe_elb_external" {
-  name_prefix = "ghe_elb_external"
-  tags = merge( var.standard_tags, var.tags, { Name = "github-external-lb" } )
-  vpc_id      = var.vpc_id
-  description = "External ELB for GitHub Enterprise Server"
-}
-
-resource "aws_security_group_rule" "ghe_elb_external_inbound_https_22_cidr" {
-  security_group_id        = aws_security_group.ghe_elb_external.id
-  type                     = "ingress"
-  cidr_blocks              = local.allowed_sources
-  from_port                = 22
-  to_port                  = 22
-  protocol                 = "tcp"
-  description              = "Inbound git"
-}
-
-resource "aws_security_group_rule" "ghe_elb_external_inbound_http_cidr" {
-  security_group_id        = aws_security_group.ghe_elb_external.id
-  type                     = "ingress"
-  cidr_blocks              = local.allowed_sources
-  from_port                = 80
-  to_port                  = 80
-  protocol                 = "tcp"
-  description              = "Inbound http to ELB"
-}
-
-resource "aws_security_group_rule" "ghe_elb_external_inbound_https_cidr" {
-  security_group_id        = aws_security_group.ghe_elb_external.id
-  type                     = "ingress"
-  cidr_blocks              = local.allowed_sources
-  from_port                = 443
-  to_port                  = 444
-  protocol                 = "tcp"
-  description              = "Inbound https to ELB"
-}
-
-# Let the ELB talk to the github server(s)
-resource "aws_security_group_rule" "ghe_elb_external_outbound_ssh" {
-  security_group_id        = aws_security_group.ghe_elb_external.id
-  type                     = "egress"
-  source_security_group_id = aws_security_group.ghe_server.id
-  from_port                = 23
-  to_port                  = 23
-  protocol                 = "tcp"
-  description              = "Outbound ssh (PROXY) from ELB to GH servers"
-}
-
-resource "aws_security_group_rule" "ghe_elb_external_outbound_http" {
-  security_group_id        = aws_security_group.ghe_elb_external.id
-  type                     = "egress"
-  source_security_group_id = aws_security_group.ghe_server.id
-  from_port                = 80
-  to_port                  = 80
-  protocol                 = "tcp"
-  description              = "Outbound HTTP from ELB to GH servers for LetsEncrypt on GHE"
-}
-
-resource "aws_security_group_rule" "ghe_elb_external_outbound_https" {
-  security_group_id        = aws_security_group.ghe_elb_external.id
-  type                     = "egress"
-  source_security_group_id = aws_security_group.ghe_server.id
-  from_port                = 443
-  to_port                  = 443
-  protocol                 = "tcp"
-  description              = "Outbound https from ELB to GH servers"
+  allowed_sources = ["0.0.0.0/0"]
 }
 
 #----------------------------------------------------------------
@@ -105,38 +37,38 @@ resource "aws_security_group_rule" "ghe_elb_external_outbound_https" {
 #----------------------------------------------------------------
 resource "aws_security_group" "ghe_elb_internal" {
   name_prefix = "ghe_elb_internal"
-  tags = merge( var.standard_tags, var.tags, { Name = "github-internal-lb" } )
+  tags        = merge(var.standard_tags, var.tags, { Name = "github-internal-lb" })
   vpc_id      = var.vpc_id
   description = "Internal ELB for GitHub Enterprise Server"
 }
 
 resource "aws_security_group_rule" "ghe_elb_internal_inbound_https_cidr" {
-  security_group_id        = aws_security_group.ghe_elb_internal.id
-  type                     = "ingress"
-  cidr_blocks              = [ "10.0.0.0/8" ]
-  from_port                = 443
-  to_port                  = 443
-  protocol                 = "tcp"
-  description              = "Inbound https"
+  security_group_id = aws_security_group.ghe_elb_internal.id
+  type              = "ingress"
+  cidr_blocks       = ["10.0.0.0/8"]
+  from_port         = 443
+  to_port           = 443
+  protocol          = "tcp"
+  description       = "Inbound https"
 }
 
 resource "aws_security_group_rule" "ghe_elb_internal_inbound_https_8443_cidr" {
-  security_group_id        = aws_security_group.ghe_elb_internal.id
-  type                     = "ingress"
-  cidr_blocks              = [ "10.0.0.0/8" ]
-  from_port                = 8443
-  to_port                  = 8443
-  protocol                 = "tcp"
-  description              = "Inbound https"
+  security_group_id = aws_security_group.ghe_elb_internal.id
+  type              = "ingress"
+  cidr_blocks       = ["10.0.0.0/8"]
+  from_port         = 8443
+  to_port           = 8443
+  protocol          = "tcp"
+  description       = "Inbound https"
 }
 resource "aws_security_group_rule" "ghe_elb_internal_inbound_https_22_cidr" {
-  security_group_id        = aws_security_group.ghe_elb_internal.id
-  type                     = "ingress"
-  cidr_blocks              = [ "10.0.0.0/8" ]
-  from_port                = 22
-  to_port                  = 22
-  protocol                 = "tcp"
-  description              = "Inbound git"
+  security_group_id = aws_security_group.ghe_elb_internal.id
+  type              = "ingress"
+  cidr_blocks       = ["10.0.0.0/8"]
+  from_port         = 22
+  to_port           = 22
+  protocol          = "tcp"
+  description       = "Inbound git"
 }
 
 # Let the ELB talk to the github server(s)
@@ -164,7 +96,7 @@ resource "aws_security_group_rule" "ghe_elb_internal_outbound_23_https" {
   security_group_id        = aws_security_group.ghe_elb_internal.id
   type                     = "egress"
   source_security_group_id = aws_security_group.ghe_server.id
-  from_port                = 23 
+  from_port                = 23
   to_port                  = 23
   protocol                 = "tcp"
   description              = "Outbound https from ELB to GH Servers"

+ 3 - 3
base/splunk_servers/indexer_cluster/asg.tf

@@ -17,7 +17,7 @@ module "indexer0" {
   tags                       = merge(var.standard_tags, var.tags, var.instance_tags[0], { Name = "${local.asg_name}-0" })
 
   # 2022-04-22: FTD - Debugging dying indexers in test
-  #suspended_processes = var.environment == "test" ? ["Terminate"] : []
+  suspended_processes = var.environment == "test" ? ["Terminate"] : []
 }
 
 module "indexer1" {
@@ -39,7 +39,7 @@ module "indexer1" {
   tags                       = merge(var.standard_tags, var.tags, var.instance_tags[1], { Name = "${local.asg_name}-1" })
 
   # 2022-04-22: FTD - Debugging dying indexers in test
-  #suspended_processes = var.environment == "test" ? ["Terminate"] : []
+  suspended_processes = var.environment == "test" ? ["Terminate"] : []
 }
 
 module "indexer2" {
@@ -61,5 +61,5 @@ module "indexer2" {
   tags                       = merge(var.standard_tags, var.tags, var.instance_tags[2], { Name = "${local.asg_name}-2" })
 
   # 2022-04-22: FTD - Debugging dying indexers in test
-  #suspended_processes = var.environment == "test" ? ["Terminate"] : []
+  suspended_processes = var.environment == "test" ? ["Terminate"] : []
 }

+ 19 - 0
submodules/load_balancer/static_nlb_to_alb/elb.tf → submodules/load_balancer/static_nlb_to_alb/alb.tf

@@ -63,3 +63,22 @@ resource "aws_lb_listener" "https_external" {
     type             = "forward"
   }
 }
+
+# If primary port is 443, redirect 80 to 443
+resource "aws_lb_listener" "portal_https_redirect" {
+  count = var.redirect_80 ? 1 : 0
+
+  load_balancer_arn = aws_lb.external.arn
+  port              = "80"
+  protocol          = "HTTP"
+
+  default_action {
+    type = "redirect"
+
+    redirect {
+      port        = var.listener_port
+      protocol    = "HTTPS"
+      status_code = "HTTP_301"
+    }
+  }
+}

+ 2 - 0
submodules/load_balancer/static_nlb_to_alb/certificate.tf

@@ -5,6 +5,8 @@ resource "aws_acm_certificate" "cert_public" {
   domain_name       = "${var.name}.${var.dns_info["public"]["zone"]}"
   validation_method = "DNS"
 
+  subject_alternative_names = var.subject_alternative_names
+
   lifecycle {
     create_before_destroy = true
   }

+ 39 - 0
submodules/load_balancer/static_nlb_to_alb/nlb.tf

@@ -5,6 +5,10 @@ resource "aws_eip" "static" {
   #checkov:skip=CKV2_AWS_19:These EIPs are attached to the NLB
   vpc = true
 
+  lifecycle {
+    prevent_destroy = true # Even if everything else goes away, we want to keep these.
+  }
+
   tags = merge(var.tags, { Name = "${var.name}-nlb-external-${var.environment}" })
 }
 
@@ -61,3 +65,38 @@ resource "aws_lb_target_group_attachment" "static" {
   target_id        = aws_lb.external.id
   port             = var.listener_port
 }
+
+# Redirect
+resource "aws_lb_listener" "static-redirect" {
+  count = var.redirect_80 ? 1 : 0
+
+  load_balancer_arn = aws_lb.static.arn
+  port              = 80
+  protocol          = "TCP"
+  default_action {
+    type             = "forward"
+    target_group_arn = aws_lb_target_group.static-redirect[0].arn
+  }
+
+  tags = merge(var.tags, { Name = "${var.name}-nlb-external-${var.environment}" })
+}
+
+resource "aws_lb_target_group" "static-redirect" {
+  count = var.redirect_80 ? 1 : 0
+
+  name_prefix = substr("${var.name}-static", 0, 6)
+  port        = 80
+  protocol    = "TCP"
+  target_type = "alb"
+  vpc_id      = var.vpc_id
+
+  tags = merge(var.tags, { Name = "${var.name}-nlb-external-${var.environment}" })
+}
+
+resource "aws_lb_target_group_attachment" "static-redirect" {
+  count = var.redirect_80 ? 1 : 0
+
+  target_group_arn = aws_lb_target_group.static-redirect[0].arn
+  target_id        = aws_lb.external.id
+  port             = 80
+}

+ 12 - 0
submodules/load_balancer/static_nlb_to_alb/outputs.tf

@@ -2,14 +2,26 @@ output "security_group_id" {
   value = aws_security_group.lb_server_external.id
 }
 
+output "alb" {
+  value = aws_lb.external
+}
+
 output "alb_id" {
   value = aws_lb.external.id
 }
 
+output "nlb" {
+  value = aws_lb.static
+}
+
 output "nlb_id" {
   value = aws_lb.static.id
 }
 
+output "nlb_name" {
+  value = aws_lb.static.name
+}
+
 output "static_ips" {
   value = aws_eip.static[*].public_ip
 }

+ 13 - 1
submodules/load_balancer/static_nlb_to_alb/security-groups.tf

@@ -19,7 +19,19 @@ resource "aws_security_group_rule" "allow_from_any" {
   from_port         = var.listener_port
   to_port           = var.listener_port
   protocol          = "tcp"
-  cidr_blocks       = ["0.0.0.0/0"]
+  cidr_blocks       = ["0.0.0.0/0"] # tfsec:ignore:aws-vpc-no-public-ingress-sgr Intentionally Open
+  security_group_id = aws_security_group.lb_server_external.id
+}
+
+resource "aws_security_group_rule" "allow_http_rediret" {
+  count = var.redirect_80 ? 1 : 0
+
+  description       = "${var.name} - Allow from Any"
+  type              = "ingress"
+  from_port         = 80
+  to_port           = 80
+  protocol          = "tcp"
+  cidr_blocks       = ["0.0.0.0/0"] # tfsec:ignore:aws-vpc-no-public-ingress-sgr Intentionally Open
   security_group_id = aws_security_group.lb_server_external.id
 }
 

+ 12 - 0
submodules/load_balancer/static_nlb_to_alb/vars.tf

@@ -3,6 +3,18 @@ variable "name" {
   type        = string
 }
 
+variable "subject_alternative_names" {
+  description = "List of alternative names for the certificate."
+  type        = list(string)
+  default     = []
+}
+
+variable "redirect_80" {
+  description = "True sets up a redirect from 80 to listener port"
+  type        = bool
+  default     = false
+}
+
 variable "target_ids" {
   description = "List of targets to assign to the ALB"
   type        = set(string)

+ 1 - 1
submodules/load_balancer/static_nlb_to_alb/waf.tf

@@ -9,7 +9,7 @@ module "waf" {
   admin_ips              = var.admin_ips #concat(var.zscalar_ips, var.admin_ips)
 
   resource_arn = aws_lb.external.arn
-  fqdns        = module.public_dns_record.forward # first entry in list will be the WAF name
+  fqdns        = concat(module.public_dns_record.forward, var.subject_alternative_names) # first entry in list will be the WAF name
 
   # Passthrough
   excluded_rules_AWSManagedRulesCommonRuleSet          = var.excluded_rules_AWSManagedRulesCommonRuleSet