Browse Source

Merge pull request #166 from mdr-engineering/feature/ftd_MSOCI-1515_NessusInitialDraft

Modules to Deploy Nessus Security Center and Scanners
Frederick Damstra 4 years ago
parent
commit
e50bae3f31

+ 1 - 0
base/nessus/instance_nessus_scanner/amis.tf

@@ -0,0 +1 @@
+../../amis.tf

+ 73 - 0
base/nessus/instance_nessus_scanner/cloud-init/cloud-init.tpl

@@ -0,0 +1,73 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.${ aws_region }.amazonaws.com,ec2messages.${ aws_region }.amazonaws.com,ec2.${ aws_region }.amazonaws.com,ssmmessages.${ aws_region }.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+      aws_region: ${ aws_region }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 188 - 0
base/nessus/instance_nessus_scanner/main.tf

@@ -0,0 +1,188 @@
+# Some instance variables
+locals {
+  ami_selection = "minion" # master, minion, ...
+}
+
+#data "aws_security_group" "typical-host" {
+#  name   = "typical-host"
+#  vpc_id = var.vpc_id
+#}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "nessus-scanner-interface" {
+  count = var.nessus_scanner_count
+  subnet_id = var.private_subnets[count.index % 3]
+  #security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.nessus_scanner.id ]
+  security_groups = [ aws_security_group.nessus_scanner.id ]
+  description = "nessus-scanner-${count.index}"
+  tags = merge(var.standard_tags, var.tags, { Name = "nessus-scanner-${count.index}" })
+}
+
+resource "aws_instance" "nessus-scanner-instance" {
+  count = var.nessus_scanner_count
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "msoc-default-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  #lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+  lifecycle { ignore_changes = [ ami, key_name, user_data ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp3"
+      volume_size = "40"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    volume_type = "gp3"
+    volume_size = 8
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.nessus-scanner-interface[count.index].id
+  }
+
+  user_data = data.template_cloudinit_config.cloud-init[count.index].rendered
+  tags = merge( var.standard_tags, var.tags, { Name = "nessus-scanner-${count.index}" })
+  volume_tags = merge( var.standard_tags, var.tags, { Name = "nessus-scanner-${count.index}" })
+}
+
+data "template_file" "cloud-init" {
+  count = var.nessus_scanner_count
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
+
+  vars = {
+    hostname = "nessus-scanner-${count.index}"
+    fqdn = "nessus-scanner-${count.index}.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+    aws_region = var.aws_region
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud-init" {
+  count = var.nessus_scanner_count
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = data.template_file.cloud-init[count.index].rendered
+  }
+
+  # Additional parts as needed
+  #part {
+  #  content_type = "text/x-shellscript"
+  #  content      = "ffbaz"
+  #}
+}
+
+module "private_dns_record_nessus-scanner" {
+  count = var.nessus_scanner_count
+  source = "../../../submodules/dns/private_A_record"
+
+  name = "nessus-scanner-${count.index}"
+  ip_addresses = [ aws_instance.nessus-scanner-instance[count.index].private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}

+ 27 - 0
base/nessus/instance_nessus_scanner/outputs.tf

@@ -0,0 +1,27 @@
+#output backup_instance_arn {
+#  value = aws_instance.ghe-backup-instance.arn
+#}
+#
+#output backup_instance_private_ip {
+#  value = aws_instance.ghe-backup-instance.private_ip
+#}
+#
+#output instance_arn {
+#  value = [ for instance in aws_instance.ghe[*]: instance.arn ]
+#}
+#
+#output instance_private_ip {
+#  value = [ for instance in aws_instance.ghe[*]: instance.private_ip ]
+#}
+#
+#output public_url {
+#  value = "github.${var.dns_info["public"]["zone"]}"
+#}
+#
+#output private_url {
+#  value = "github.${var.dns_info["private"]["zone"]}"
+#}
+#
+#output efs_id {
+#  value = aws_efs_file_system.ghe_backup_data.id
+#}

+ 71 - 0
base/nessus/instance_nessus_scanner/securitygroup-server.tf

@@ -0,0 +1,71 @@
+# SG Summary - Server
+#     Ingress: 
+#       22 - sync from other security centers
+#       443 - User access
+#     Egress:  
+#       25 - smtp
+#       443 - updates
+#       tcp/1243 - "Communicating with Log Correlation Engine" (unneeded in xdr)
+#       tcp/8834-8835 - Communicating With Nessus - to vpc-scanners
+resource "aws_security_group" "nessus_scanner" {
+  name_prefix = "nessus_scanner"
+  tags = merge( var.standard_tags, var.tags, { Name = "nessus_scanner" } )
+  vpc_id      = var.vpc_id
+  description = "Nessus Security Scanner"
+}
+
+#-----------------------------------------------------------------
+# Inbound access
+#-----------------------------------------------------------------
+resource "aws_security_group_rule" "nessus_scanner_inbound_22" {
+  security_group_id        = aws_security_group.nessus_scanner.id
+  type                     = "ingress"
+  cidr_blocks              = toset(concat(var.cidr_map["vpc-access"], var.cidr_map["vpc-private-services"]))
+  from_port                = 22
+  to_port                  = 22
+  protocol                 = "tcp"
+  description              = "Inbound 443 (from access)"
+}
+
+resource "aws_security_group_rule" "nessus_scanner_inbound_443" {
+  security_group_id        = aws_security_group.nessus_scanner.id
+  type                     = "ingress"
+  cidr_blocks              = toset(concat(var.cidr_map["vpc-access"], var.cidr_map["vpc-private-services"]))
+  from_port                = 443
+  to_port                  = 443
+  protocol                 = "tcp"
+  description              = "Inbound 443 (from access)"
+}
+
+resource "aws_security_group_rule" "nessus_scanner_inbound_nessus" {
+  security_group_id        = aws_security_group.nessus_scanner.id
+  type                     = "ingress"
+  cidr_blocks              = toset(concat(var.cidr_map["vpc-access"], var.cidr_map["vpc-private-services"]))
+  from_port                = 8834
+  to_port                  = 8835
+  protocol                 = "tcp"
+  description              = "Inbound Nessus"
+}
+
+resource "aws_security_group_rule" "nessus_scanner_inbound_scan_ourselves" {
+  security_group_id        = aws_security_group.nessus_scanner.id
+  source_security_group_id = aws_security_group.nessus_scanner.id
+  type                     = "ingress"
+  from_port                = -1
+  to_port                  = -1
+  protocol                 = "all"
+  description              = "Inbound Scanning of Ourselves"
+}
+
+#-----------------------------------------------------------------
+# Outbound access
+#-----------------------------------------------------------------
+resource "aws_security_group_rule" "nessus_scanner_outbound_all_ports" {
+  security_group_id        = aws_security_group.nessus_scanner.id
+  type                     = "egress"
+  cidr_blocks              = [ "10.0.0.0/8" ]
+  from_port                = -1
+  to_port                  = -1
+  protocol                 = "all"
+  description              = "Outbound to All Ports"
+}

+ 52 - 0
base/nessus/instance_nessus_scanner/vars.tf

@@ -0,0 +1,52 @@
+variable "nessus_scanner_count" {
+  type = number
+}
+
+variable "public_subnets" {
+  type = list(string)
+}
+
+variable "private_subnets" {
+  type = list(string)
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "ses_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "aws_account_id" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/nessus/instance_nessus_scanner/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.13"
+}

+ 1 - 0
base/nessus/instance_security_center/amis.tf

@@ -0,0 +1 @@
+../../amis.tf

+ 33 - 0
base/nessus/instance_security_center/certificate.tf

@@ -0,0 +1,33 @@
+#----------------------------------------------------------------------------
+# DNS Certificate
+#----------------------------------------------------------------------------
+resource "aws_acm_certificate" "cert_private" {
+  domain_name       = "security-center.${var.dns_info["private"]["zone"]}"
+  validation_method = "DNS"
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_acm_certificate_validation" "cert_private" {
+  certificate_arn         = aws_acm_certificate.cert_private.arn
+  validation_record_fqdns = [for record in aws_route53_record.cert_validation_private: record.fqdn]
+}
+
+resource "aws_route53_record" "cert_validation_private" {
+  provider = aws.mdr-common-services-commercial
+
+  for_each = {
+    for dvo in aws_acm_certificate.cert_private.domain_validation_options : dvo.domain_name => {
+      name   = dvo.resource_record_name
+      record = dvo.resource_record_value
+      type   = dvo.resource_record_type
+    }
+  }
+
+  allow_overwrite = true
+  name            = each.value.name
+  records         = [each.value.record]
+  ttl             = 60
+  type            = each.value.type
+  zone_id         = var.dns_info["public"]["zone_id"]
+}

+ 73 - 0
base/nessus/instance_security_center/cloud-init/cloud-init.tpl

@@ -0,0 +1,73 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.${ aws_region }.amazonaws.com,ec2messages.${ aws_region }.amazonaws.com,ec2.${ aws_region }.amazonaws.com,ssmmessages.${ aws_region }.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+      aws_region: ${ aws_region }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 141 - 0
base/nessus/instance_security_center/elb.tf

@@ -0,0 +1,141 @@
+#----------------------------------------------------------------------------
+# INTERNAL LB
+#----------------------------------------------------------------------------
+resource "aws_alb" "security_center_internal" {
+  name               = "nessus-sc-alb-${var.environment}"
+  security_groups    = [ aws_security_group.security_center_alb_server_internal.id ]
+  internal           = true 
+  subnets            = var.private_subnets
+  load_balancer_type = "application"
+
+  access_logs {
+    bucket  = "xdr-elb-${ var.environment }"
+    enabled = true
+  }
+
+  idle_timeout = 1200
+
+  tags = merge(var.standard_tags, var.tags, { Name = "security-center-alb-internal-${var.environment}" })
+}
+
+# Create a new target group
+resource "aws_alb_target_group" "security_center_internal" {
+  name                 = "security-center-alb-targets"
+  port                 = 443
+  protocol             = "HTTPS"
+  vpc_id               = var.vpc_id
+
+  health_check {
+    protocol = "HTTPS"
+    port     = "443"
+    path     = "/"
+    matcher  = "200,302"
+    timeout  = "4"
+    interval = "5"
+    unhealthy_threshold = 2
+    healthy_threshold   = 2
+  }
+
+  #stickiness {
+  #  type    = "lb_cookie"
+  #  enabled = false 
+  #}
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_lb_target_group_attachment" "security_center_internal" {
+  target_group_arn = aws_alb_target_group.security_center_internal.arn
+  target_id        = aws_instance.security-center-instance.id
+  port             = 443
+}
+
+# Create a new alb listener
+resource "aws_alb_listener" "security_center_https_internal" {
+  load_balancer_arn = aws_alb.security_center_internal.arn
+  port              = "443"
+  protocol          = "HTTPS"
+  ssl_policy        = "ELBSecurityPolicy-FS-1-2-Res-2019-08" # PFS, TLS1.2, most "restrictive" policy (took awhile to find that)
+  certificate_arn   = aws_acm_certificate.cert_private.arn
+
+  default_action {
+    target_group_arn = aws_alb_target_group.security_center_internal.arn
+    type             = "forward"
+  }
+}
+
+resource "aws_lb_listener" "security_center_listener_http" {
+  load_balancer_arn = aws_alb.security_center_internal.arn
+  port              = "80"
+  protocol          = "HTTP"
+
+  default_action {
+    type             = "redirect"
+
+    redirect {
+      port        = "443"
+      protocol    = "HTTPS"
+      status_code = "HTTP_301"
+    }
+  }
+}
+
+# #########################
+# # DNS Entry
+module "private_dns_record" {
+  source = "../../../submodules/dns/private_CNAME_record"
+
+  name = "security-center"
+  target_dns_names = [ aws_alb.security_center_internal.dns_name ]
+  dns_info = var.dns_info
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}
+
+#----------------------------------------------------------------------------
+# ALB Security Group
+#----------------------------------------------------------------------------
+resource "aws_security_group" "security_center_alb_server_internal" {
+  vpc_id      = var.vpc_id
+  name        = "security-center-alb-sg-internal"
+  description = "ALB for Security Center"
+  tags = merge(var.standard_tags, var.tags)
+}
+
+#----------------------------------------------------------------------------
+# INGRESS
+#----------------------------------------------------------------------------
+resource "aws_security_group_rule" "http_from_access" {
+  description = "HTTP inbound from Internet"
+  type = "ingress"
+  from_port = "80"
+  to_port = "80"
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["vpc-access"]
+  security_group_id = aws_security_group.security_center_alb_server_internal.id
+}
+
+resource "aws_security_group_rule" "https_from_access" {
+  description = "HTTPS inbound from Internet"
+  type = "ingress"
+  from_port = "443"
+  to_port = "443"
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["vpc-access"]
+  security_group_id = aws_security_group.security_center_alb_server_internal.id
+}
+
+#----------------------------------------------------------------------------
+# EGRESS
+#----------------------------------------------------------------------------
+resource "aws_security_group_rule" "security_center_alb_to_server" {
+  description = "Security Center ALB to the Server"
+  type = "egress"
+  from_port = "443"
+  to_port = "443"
+  protocol = "tcp"
+  source_security_group_id = aws_security_group.security_center.id
+  security_group_id = aws_security_group.security_center_alb_server_internal.id
+}

+ 85 - 0
base/nessus/instance_security_center/instance-profile.tf.skipped

@@ -0,0 +1,85 @@
+resource "aws_iam_instance_profile" "jira_server_instance_profile" {
+  name     = "jira-server-instance-profile"
+  role     = aws_iam_role.jira_server.name
+}
+
+resource "aws_iam_role" "jira_server" {
+  name     = "jira-server-instance-role"
+  path     = "/instance/"
+
+  assume_role_policy = <<EOF
+{
+    "Version": "2012-10-17",
+    "Statement": [
+      {
+        "Sid": "",
+        "Effect": "Allow",
+        "Principal": {
+          "Service": [
+            "ec2.amazonaws.com",
+            "ssm.amazonaws.com"
+            ]
+        },
+        "Action": "sts:AssumeRole"
+      }
+    ]
+  }
+EOF
+}
+
+data "aws_iam_policy_document" "jira_server_ecr_policy" {
+  statement {
+    actions = [
+      "ecr:GetAuthorizationToken",
+    ]
+
+    resources = ["*"]
+  }
+
+  statement {
+    sid    = "AllowCluCommunicationECR"
+    effect = "Allow"
+
+    actions = [
+			"ecr:BatchCheckLayerAvailability",
+			"ecr:GetDownloadUrlForLayer",
+			"ecr:GetRepositoryPolicy",
+			"ecr:DescribeRepositories",
+			"ecr:ListImages",
+			"ecr:DescribeImages",
+			"ecr:BatchGetImage",
+			"ecr:InitiateLayerUpload",
+			"ecr:UploadLayerPart",
+			"ecr:CompleteLayerUpload",
+			"ecr:PutImage"
+    ]
+
+    resources = [
+      "arn:${var.aws_partition}:ecr:us-east-1:${var.aws_account_id}:repository/*"
+    ]
+  }
+
+  statement {
+    sid    = "Tags"
+    effect = "Allow"
+
+    actions = [
+      "ec2:DescribeTags",
+      "ec2:DescribeInstances"
+    ]
+    resources = [
+      "*"
+    ]
+  }
+}
+
+resource "aws_iam_policy" "jira_server_ecr_policy" {
+  name     = "jira-server"
+  path     = "/instance/"
+  policy   = data.aws_iam_policy_document.jira_server_ecr_policy.json
+}
+
+resource "aws_iam_role_policy_attachment" "jira_server_ecr" {
+  role       = aws_iam_role.jira_server.name
+  policy_arn = aws_iam_policy.jira_server_ecr_policy.arn
+}

+ 183 - 0
base/nessus/instance_security_center/main.tf

@@ -0,0 +1,183 @@
+# Some instance variables
+locals {
+  ami_selection = "minion" # master, minion, ...
+}
+
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "security-center-interface" {
+  subnet_id = var.private_subnets[0]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.security_center.id ]
+  description = "security-center"
+  tags = merge(var.standard_tags, var.tags, { Name = "security-center" })
+}
+
+resource "aws_instance" "security-center-instance" {
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "msoc-default-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  #lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+  lifecycle { ignore_changes = [ ami, key_name, user_data ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp3"
+      volume_size = "250"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    volume_type = "gp3"
+    volume_size = 8
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.security-center-interface.id
+  }
+
+  user_data = data.template_cloudinit_config.cloud-init.rendered
+  tags = merge( var.standard_tags, var.tags, { Name = "security-center-0" })
+  volume_tags = merge( var.standard_tags, var.tags, { Name = "security-center-0" })
+}
+
+data "template_file" "cloud-init" {
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
+
+  vars = {
+    hostname = "security-center-0"
+    fqdn = "security-center-0.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+    aws_region = var.aws_region
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud-init" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = data.template_file.cloud-init.rendered
+  }
+
+  # Additional parts as needed
+  #part {
+  #  content_type = "text/x-shellscript"
+  #  content      = "ffbaz"
+  #}
+}
+
+module "private_dns_record_security-center" {
+  source = "../../../submodules/dns/private_A_record"
+
+  name = "security-center-0"
+  ip_addresses = [ aws_instance.security-center-instance.private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}

+ 27 - 0
base/nessus/instance_security_center/outputs.tf

@@ -0,0 +1,27 @@
+#output backup_instance_arn {
+#  value = aws_instance.ghe-backup-instance.arn
+#}
+#
+#output backup_instance_private_ip {
+#  value = aws_instance.ghe-backup-instance.private_ip
+#}
+#
+#output instance_arn {
+#  value = [ for instance in aws_instance.ghe[*]: instance.arn ]
+#}
+#
+#output instance_private_ip {
+#  value = [ for instance in aws_instance.ghe[*]: instance.private_ip ]
+#}
+#
+#output public_url {
+#  value = "github.${var.dns_info["public"]["zone"]}"
+#}
+#
+#output private_url {
+#  value = "github.${var.dns_info["private"]["zone"]}"
+#}
+#
+#output efs_id {
+#  value = aws_efs_file_system.ghe_backup_data.id
+#}

+ 51 - 0
base/nessus/instance_security_center/securitygroup-server.tf

@@ -0,0 +1,51 @@
+# SG Summary - Server
+#     Ingress: 
+#       22 - sync from other security centers
+#       443 - User access
+#     Egress:  
+#       25 - smtp
+#       443 - updates
+#       tcp/1243 - "Communicating with Log Correlation Engine" (unneeded in xdr)
+#       tcp/8834-8835 - Communicating With Nessus - to vpc-scanners
+resource "aws_security_group" "security_center" {
+  name_prefix = "security_center"
+  tags = merge( var.standard_tags, var.tags, { Name = "security_center" } )
+  vpc_id      = var.vpc_id
+  description = "Nessus Security Scanner"
+}
+
+#-----------------------------------------------------------------
+# Inbound access
+#-----------------------------------------------------------------
+resource "aws_security_group_rule" "security_center_inbound_443" {
+  security_group_id        = aws_security_group.security_center.id
+  type                     = "ingress"
+  cidr_blocks              = var.cidr_map["vpc-access"]
+  from_port                = 443
+  to_port                  = 443
+  protocol                 = "tcp"
+  description              = "Inbound 443 (from access, for testing)"
+}
+
+resource "aws_security_group_rule" "security_center_inbound_443_from_alb" {
+  security_group_id        = aws_security_group.security_center.id
+  type                     = "ingress"
+  source_security_group_id = aws_security_group.security_center_alb_server_internal.id
+  from_port                = 443
+  to_port                  = 443
+  protocol                 = "tcp"
+  description              = "Inbound 443 from the ALB"
+}
+
+#-----------------------------------------------------------------
+# Outbound access
+#-----------------------------------------------------------------
+resource "aws_security_group_rule" "security_center_outbound_nessus" {
+  security_group_id        = aws_security_group.security_center.id
+  type                     = "egress"
+  cidr_blocks              = var.cidr_map["vpc-scanners"]
+  from_port                = 8834
+  to_port                  = 8835
+  protocol                 = "tcp"
+  description              = "Outbound to Scanners"
+}

+ 48 - 0
base/nessus/instance_security_center/vars.tf

@@ -0,0 +1,48 @@
+variable "public_subnets" {
+  type = list(string)
+}
+
+variable "private_subnets" {
+  type = list(string)
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "ses_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "aws_account_id" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/nessus/instance_security_center/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.13"
+}