Browse Source

Adds Sensu to GC

Brad Poulton 4 years ago
parent
commit
5a891b26ce

+ 95 - 0
base/sensu-configuration/assets.tf

@@ -0,0 +1,95 @@
+#https://bonsai.sensu.io/assets/sensu-plugins/sensu-plugins-http
+resource "sensu_asset" "asset_http" {
+  name = "sensu-plugins-http"
+
+  build {
+    sha512 = "eb4cb5aee521500c0580111814d909ec8ccda1a13c301578e614e932dc5420fdcd1b7da129a0d801440e433c51eb2fb7aa69df16bfaf6cdbb1ac20ed032d2063"
+    url = "https://assets.bonsai.sensu.io/33783747d3af1939808944c2253c262d9d5b3fb7/sensu-plugins-http_6.0.0_centos7_linux_amd64.tar.gz"
+    filters = [
+      "entity.system.os=='linux'",
+      "entity.system.arch=='amd64'",
+      "entity.system.platform_family=='rhel'",
+      "entity.system.platform_version.split('.')[0] == '7'",
+    ]
+  }
+}
+
+#https://bonsai.sensu.io/assets/sensu/monitoring-plugins
+resource "sensu_asset" "asset_monitoring" {
+  name = "sensu-plugins-monitoring"
+
+  build {
+    sha512 = "7c16b80a419b7b6f11f17eab8ab2ff223e96dcffd090c9b21ec50f3f15cd25809611086afc536dca330924945a88a8f7d8615aa35c3b456a690b064802e4ddb7"
+    url = "https://assets.bonsai.sensu.io/1730d4a16e1072e4adcb549249ef388126ba0faf/monitoring-plugins-centos7_2.6.0_linux_amd64.tar.gz"
+    filters = [
+      "entity.system.os=='linux'",
+      "entity.system.arch=='amd64'",
+      "entity.system.platform_family=='rhel'",
+      "entity.system.platform_version.split('.')[0] == '7'",
+    ]
+  }
+}
+
+#https://bonsai.sensu.io/assets/sensu-plugins/sensu-plugins-network-checks
+resource "sensu_asset" "asset_network" {
+  name = "sensu-plugins-network-checks"
+
+  build {
+    sha512 = "f0a229918245d2156fcc34e272cb351d09f3d7ee79057cccaa88121d837723951c816593104ff959528b0dec7f18901b6735f7b7cf765ddcce85c6fdbb559378"
+    url = "https://assets.bonsai.sensu.io/a2115474fe198f3895b953f6d90de86607f33722/sensu-plugins-network-checks_5.0.0_centos7_linux_amd64.tar.gz"
+    filters = [
+      "entity.system.os=='linux'",
+      "entity.system.arch=='amd64'",
+      "entity.system.platform_family=='rhel'",
+      "entity.system.platform_version.split('.')[0] == '7'",
+    ]
+  }
+}
+
+#https://bonsai.sensu.io/assets/sensu-plugins/sensu-plugins-process-checks
+resource "sensu_asset" "asset_process" {
+  name = "sensu-plugins-process-checks"
+
+  build {
+    sha512 = "1c10dda30ecf0298583b186e9b46f8319d18b093d3f6c3b2be7b475b99ba7e281e8af78a3bcc45fedfeca1d498690a894e15fe56efa9dba5d2052134e1b1ab75"
+    url = "https://assets.bonsai.sensu.io/d582eeb357ca2c483cf1dc290640baca8dcd66f5/sensu-plugins-process-checks_4.1.0_centos7_linux_amd64.tar.gz"
+    filters = [
+      "entity.system.os=='linux'",
+      "entity.system.arch=='amd64'",
+      "entity.system.platform_family=='rhel'",
+      "entity.system.platform_version.split('.')[0] == '7'",
+    ]
+  }
+}
+
+#https://bonsai.sensu.io/assets/sensu/sensu-ruby-runtime
+resource "sensu_asset" "asset_ruby" {
+  name = "sensu-ruby-runtime"
+
+  build {
+    sha512 = "444ff064741e5f69c470742f817e64f3b89592490f1bed96e2894914a4909d4a56d50a86c977684cbb8570bd445c9b38110f176b0a2f9765ffb0b768efcd9bf0"
+    url = "https://assets.bonsai.sensu.io/3a73f770b6d29a7f180be850f0d0098e7eba5d77/sensu-ruby-runtime_0.0.11_ruby-2.4.4_centos7_linux_amd64.tar.gz"
+    filters = [
+      "entity.system.os=='linux'",
+      "entity.system.arch=='amd64'",
+      "entity.system.platform_family=='rhel'",
+      "entity.system.platform_version.split('.')[0] == '7'",
+    ]
+  }
+}
+
+#https://bonsai.sensu.io/assets/sensu-plugins/sensu-plugins-victorops
+resource "sensu_asset" "asset_victorops" {
+  name = "sensu-plugins-victorops"
+
+  build {
+    sha512 = "6f10ff16a1e43e82ca5dc57ce57dce23c311c957cf678bb9c3c3dc3ee1be84fc8c92c39b50e216e1bea568866e085975af60a58dc465f62beaa640a1e5d65a7d"
+    url = "https://assets.bonsai.sensu.io/42b661acefbfe02bc274858bdbff574d961e6a56/sensu-plugins-victorops_3.0.0_centos7_linux_amd64.tar.gz"
+    filters = [
+      "entity.system.os=='linux'",
+      "entity.system.arch=='amd64'",
+      "entity.system.platform_family=='rhel'",
+      "entity.system.platform_version.split('.')[0] == '7'",
+    ]
+  }
+}

+ 185 - 0
base/sensu-configuration/checks.tf

@@ -0,0 +1,185 @@
+locals {
+  splunk_hot = var.environment == "test" ? "10000" : "50000"
+  interconnect-0 = var.environment == "test" ? ["169.254.230.197", "169.254.142.233", "169.254.221.229", "169.254.145.141"] : ["169.254.152.217", "169.254.88.105", "169.254.253.45", "169.254.91.129"]
+  interconnect-1 = var.environment == "test" ? ["169.254.186.189", "169.254.119.73", "169.254.20.161", "169.254.128.189"] : ["169.254.247.157", "169.254.246.157", "169.254.22.21", "169.254.38.13"]
+}
+
+resource "sensu_check" "check_disk_base" {
+  name           = "check_disk_base"
+  command        = "check_disk -c 250 -p /var -C -c 500 -p /var/log -C -c 1000 -p /var/log/audit -C -c 2000 -p /opt -C -c 500 -p /boot -C -c 1000 -p /"
+  namespace      = "default"
+  subscriptions  = [ "linux", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-monitoring", ]
+}
+
+resource "sensu_check" "check_disk_indexer" {
+  name           = "check_disk_indexer"
+  command        = "check_disk -c ${local.splunk_hot} -p /opt/splunkdata/hot -C -c 5000 -p /opt/splunk"
+  namespace      = "default"
+  subscriptions  = [ "check_disk_indexer", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-monitoring", ]
+}
+
+resource "sensu_check" "check_disk_syslog" {
+  name           = "check_disk_syslog"
+  command        = "check_disk -c 7000 -p /opt/syslog-ng"
+  namespace      = "default"
+  subscriptions  = [ "check_disk_syslog", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-monitoring", ]
+}
+
+resource "sensu_check" "check_ping_interconnect-0" {
+  for_each       = toset(local.interconnect-0)
+  name           = "ping_interconnect-0-${index(local.interconnect-0, each.value) +1}"
+  command        = "check_ping -H ${each.value} -w 100,80% -c 100,80% -4"
+  namespace      = "default"
+  subscriptions  = [ "interconnect-0", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-monitoring", ]
+}
+
+resource "sensu_check" "check_ping_interconnect-1" {
+  for_each       = toset(local.interconnect-1)
+  name           = "ping_interconnect-1-${index(local.interconnect-1, each.value) +1}"
+  command        = "check_ping -H ${each.value} -w 100,80% -c 100,80% -4"
+  namespace      = "default"
+  subscriptions  = [ "interconnect-1", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-monitoring", ]
+}
+
+resource "sensu_check" "check_phantom_ports" {
+  name           = "phantom_ports"
+  command        = "check-ports.rb -h 0.0.0.0 -p 443"
+  namespace      = "default"
+  subscriptions  = [ "phantom_ports", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
+}
+
+#only needed in PROD
+resource "sensu_check" "check_portal_http" {
+  count          = var.environment == "test" ? 0 : 1
+  name           = "portal_http"
+  command        = "metrics-curl.rb -u https://portal.xdr.accenturefederalcyber.com"
+  namespace      = "default"
+  subscriptions  = [ "portal", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-http", "sensu-ruby-runtime", ]
+}
+
+resource "sensu_check" "check_salt_master_ports" {
+  name           = "salt_master_ports"
+  command        = "check-ports.rb -h 0.0.0.0 -p 4505,4506"
+  namespace      = "default"
+  subscriptions  = [ "salt_master_ports", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
+}
+
+resource "sensu_check" "check_splunk_cm_ports" {
+  name           = "splunk_cm_ports"
+  command        = "check-ports.rb -h 0.0.0.0 -p 8089"
+  namespace      = "default"
+  subscriptions  = [ "splunk_cm_ports", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
+}
+
+resource "sensu_check" "check_splunk_ds_ports" {
+  name           = "splunk_ds_ports"
+  command        = "check-ports.rb -h 0.0.0.0 -p 8089"
+  namespace      = "default"
+  subscriptions  = [ "splunk_ds_ports", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
+}
+
+resource "sensu_check" "check_splunk_hf_ports" {
+  name           = "splunk_hf_ports"
+  command        = "check-ports.rb -h 0.0.0.0 -p 8089,8000"
+  namespace      = "default"
+  subscriptions  = [ "splunk_hf_ports", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
+}
+
+resource "sensu_check" "check_splunk_indexer_ports_moose" {
+  name           = "splunk_indexer_ports_moose"
+  command        = "check-ports.rb -h 0.0.0.0 -p 8089,9998,9887,8088"
+  namespace      = "default"
+  subscriptions  = [ "splunk_indexer_ports_moose", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
+}
+
+resource "sensu_check" "check_splunk_indexer_ports" {
+  name           = "splunk_indexer_ports"
+  command        = "check-ports.rb -h 0.0.0.0 -p 8089,9998,9887"
+  namespace      = "default"
+  subscriptions  = [ "splunk_indexer_ports", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
+}
+
+resource "sensu_check" "check_splunk_sh_ports" {
+  name           = "splunk_sh_ports"
+  command        = "check-ports.rb -h 0.0.0.0 -p 8089,8000"
+  namespace      = "default"
+  subscriptions  = [ "splunk_sh_ports", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
+}
+
+resource "sensu_check" "check_splunk_uf_ports" {
+  name           = "splunk_uf_ports"
+  command        = "check-ports.rb -h 0.0.0.0 -p 8089"
+  namespace      = "default"
+  subscriptions  = [ "splunk", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-network-checks", "sensu-ruby-runtime", ]
+}
+
+resource "sensu_check" "check_syslog-ng_service" {
+  name           = "syslog-ng_service"
+  command        = "check-process.rb -p syslog-ng"
+  namespace      = "default"
+  subscriptions  = [ "syslog-ng_service", ]
+  handlers       = [ "victorops", ]
+  cron           = "* * * * *"
+  publish        = "true"
+  runtime_assets = [ "sensu-plugins-process-checks", "sensu-ruby-runtime", ]
+}

+ 8 - 0
base/sensu-configuration/filters.tf

@@ -0,0 +1,8 @@
+resource "sensu_filter" "filter_handler-delay" {
+  name       = "handler-delay"
+  action     = "allow"
+  namespace  = "default"
+  expressions = [
+    "event.check.occurrences == 5 || event.is_resolution",
+  ]
+}

+ 20 - 0
base/sensu-configuration/handlers.tf

@@ -0,0 +1,20 @@
+locals {
+    victorops_team = var.environment == "test" ? "test" : "engineering"
+}
+
+resource "sensu_handler" "handler_keepalive" {
+  name      = "keepalive"
+  type      = "set"
+  namespace = "default"
+  handlers  = [ "victorops", ]
+}
+
+resource "sensu_handler" "handler_victorops" {
+  name           = "victorops"
+  type           = "pipe"
+  namespace      = "default"
+  handlers       = [ "victorops", ]
+  filters        = [ "is_incident", "not_silenced", "handler-delay", ]
+  runtime_assets = [ "sensu-plugins-victorops", "sensu-ruby-runtime", ]
+  command        = "handler-victorops.rb --map_go_event_into_ruby -a https://alert.victorops.com/integrations/generic/20131114/alert/864a1b38-4243-4137-8baa-b587ba5f300b/ -r ${local.victorops_team}"
+}

+ 22 - 0
base/sensu-configuration/main.tf

@@ -0,0 +1,22 @@
+#----------------------------------------------------------------------------
+# OIDC role
+#----------------------------------------------------------------------------
+
+resource "sensu_cluster_role" "cluster_role" {
+  name = "mdr-admin"
+  rule {
+    verbs = ["*"]
+    resources = ["*"]
+    resource_names = []
+  }
+}
+
+resource "sensu_cluster_role_binding" "cluster_role_binding" {
+  name = "mdr-admin"
+  cluster_role = "mdr-admin"
+  groups = ["okta:mdr-admins"]
+}
+
+
+
+

+ 2 - 0
base/sensu-configuration/vars.tf

@@ -0,0 +1,2 @@
+variable "dns_info" { type = map }
+variable "environment" { type = string }

+ 1 - 0
base/sensu/amis.tf

@@ -0,0 +1 @@
+../amis.tf

+ 71 - 0
base/sensu/certificate.tf

@@ -0,0 +1,71 @@
+#----------------------------------------------------------------------------
+# Private DNS Certificate
+#----------------------------------------------------------------------------
+
+
+resource "aws_acm_certificate" "cert" {
+  domain_name       = "sensu.${var.dns_info["private"]["zone"]}"
+  validation_method = "DNS"
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_acm_certificate_validation" "cert" {
+  certificate_arn         = aws_acm_certificate.cert.arn
+  validation_record_fqdns = [for record in aws_route53_record.cert_validation: record.fqdn]
+}
+
+resource "aws_route53_record" "cert_validation" {
+  provider = aws.mdr-common-services-commercial
+
+  for_each = {
+    for dvo in aws_acm_certificate.cert.domain_validation_options : dvo.domain_name => {
+      name   = dvo.resource_record_name
+      record = dvo.resource_record_value
+      type   = dvo.resource_record_type
+    }
+  }
+
+  allow_overwrite = true
+  name            = each.value.name
+  records         = [each.value.record]
+  ttl             = 60
+  type            = each.value.type
+  zone_id         = var.dns_info["public"]["zone_id"]
+}
+
+#----------------------------------------------------------------------------
+# Public DNS Certificate
+#----------------------------------------------------------------------------
+
+
+resource "aws_acm_certificate" "cert_public" {
+  domain_name       = "sensu.${var.dns_info["public"]["zone"]}"
+  validation_method = "DNS"
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_acm_certificate_validation" "cert_public" {
+  certificate_arn         = aws_acm_certificate.cert_public.arn
+  validation_record_fqdns = [for record in aws_route53_record.cert_validation_public: record.fqdn]
+}
+
+resource "aws_route53_record" "cert_validation_public" {
+  provider = aws.mdr-common-services-commercial
+
+  for_each = {
+    for dvo in aws_acm_certificate.cert_public.domain_validation_options : dvo.domain_name => {
+      name   = dvo.resource_record_name
+      record = dvo.resource_record_value
+      type   = dvo.resource_record_type
+    }
+  }
+
+  allow_overwrite = true
+  name            = each.value.name
+  records         = [each.value.record]
+  ttl             = 60
+  type            = each.value.type
+  zone_id         = var.dns_info["public"]["zone_id"]
+}

+ 72 - 0
base/sensu/cloud-init/cloud-init.tpl

@@ -0,0 +1,72 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.${ aws_region }.amazonaws.com,ec2messages.${ aws_region }.amazonaws.com,ec2.${ aws_region }.amazonaws.com,ssmmessages.${ aws_region }.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 284 - 0
base/sensu/elb.tf

@@ -0,0 +1,284 @@
+# lb ports
+locals {
+  alb_listener_ports = {
+       ui    = "8000"
+       api   = "8080"
+       agent = "8081"
+      }
+}
+
+#----------------------------------------------------------------------------
+# INTERNAL LB
+#----------------------------------------------------------------------------
+
+resource "aws_alb" "sensu_internal" {
+  name               = "sensu-alb-internal-${var.environment}"
+  security_groups    = [ aws_security_group.sensu_alb_server_internal.id ]
+  internal           = true 
+  subnets            = var.subnets
+  load_balancer_type = "application"
+
+
+  access_logs {
+    bucket  = "xdr-elb-${ var.environment }"
+    enabled = true
+  }
+
+  tags = merge(var.standard_tags, var.tags, { Name = "sensu-alb-internal-${var.environment}" })
+}
+
+resource "aws_alb_target_group" "sensu_internal" {
+  for_each = local.alb_listener_ports
+  name                 = "sensu-alb-targets-${each.key}"
+  port                 = each.value 
+  protocol             = "HTTPS"
+  #deregistration_delay = "${local.lb_deregistration_delay}"
+  vpc_id               = var.vpc_id
+
+  health_check {
+    protocol = "HTTPS"
+    port     = "8080"
+    path     = "/health"
+    matcher  = "200"
+    timeout  = "4"
+    interval = "5"
+  }
+
+  stickiness {
+    type    = "lb_cookie"
+    enabled = false 
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_lb_target_group_attachment" "sensu_internal" {
+  for_each = local.alb_listener_ports
+  target_group_arn = aws_alb_target_group.sensu_internal[each.key].arn
+  target_id        = aws_instance.instance.id
+  port             = each.value
+}
+
+# Create a new alb listener
+resource "aws_alb_listener" "sensu_internal" {
+  for_each = local.alb_listener_ports
+  load_balancer_arn = aws_alb.sensu_internal.arn
+  port              = each.value
+  protocol          = "HTTPS"
+  ssl_policy        = "ELBSecurityPolicy-FS-1-2-Res-2019-08" # PFS, TLS1.2, most "restrictive" policy (took awhile to find that)
+  certificate_arn   = aws_acm_certificate.cert.arn
+
+  default_action {
+    target_group_arn = aws_alb_target_group.sensu_internal[each.key].arn
+    type             = "forward"
+  }
+}
+
+#DNS Alias for the LB ( the CNAME was required. an Alias did NOT work due to aws/bug. )
+resource "aws_route53_record" "sensu_internal" {
+  zone_id = var.dns_info["private"]["zone_id"]
+  name    = var.instance_name 
+  type    = "CNAME" 
+  records = [aws_alb.sensu_internal.dns_name]
+  ttl = "60"
+  provider = aws.c2
+}
+
+#----------------------------------------------------------------------------
+# ALB Security Group
+#----------------------------------------------------------------------------
+
+resource "aws_security_group" "sensu_alb_server_internal" {
+  vpc_id      = var.vpc_id
+  name        = "sensu-alb-sg-internal"
+  description = "Sensu Internal LB SG"
+  tags = merge(var.standard_tags, var.tags)
+}
+
+#----------------------------------------------------------------------------
+# INGRESS
+#----------------------------------------------------------------------------
+
+
+resource "aws_security_group_rule" "sensu_from_vpc" {
+  for_each = local.alb_listener_ports
+  type              = "ingress"
+  from_port         = each.value
+  to_port           = each.value
+  protocol          = "tcp"
+  cidr_blocks       = ["10.0.0.0/8"]
+  description       = "Sensu ${each.key}"
+  security_group_id = aws_security_group.sensu_alb_server_internal.id
+}
+
+#----------------------------------------------------------------------------
+# EGRESS
+#----------------------------------------------------------------------------
+
+resource "aws_security_group_rule" "sensu_from_alb" {
+  for_each = local.alb_listener_ports
+  type              = "egress"
+  from_port         = each.value
+  to_port           = each.value
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.instance_security_group.id
+  description       = "Sensu ${each.key}"
+  security_group_id = aws_security_group.sensu_alb_server_internal.id
+}
+
+#----------------------------------------------------------------------------
+# EXTERNAL LB
+#----------------------------------------------------------------------------
+
+resource "aws_alb" "sensu_external" {
+  name               = "sensu-alb-external-${var.environment}"
+  security_groups    = [ aws_security_group.sensu_alb_server_external.id ]
+  internal           = false 
+  subnets            = var.subnets
+  load_balancer_type = "application"
+
+
+  access_logs {
+    bucket  = "xdr-elb-${ var.environment }"
+    enabled = true
+  }
+
+  tags = merge(var.standard_tags, var.tags, { Name = "sensu-alb-external-${var.environment}" })
+}
+
+# Create a new target group
+resource "aws_alb_target_group" "sensu_external" {
+  name                 = "sensu-alb-targets-agent-external"
+  port                 = 8081
+  protocol             = "HTTPS"
+  #deregistration_delay = "${local.lb_deregistration_delay}"
+  vpc_id               = var.vpc_id
+
+  health_check {
+    protocol = "HTTPS"
+    port     = "8080"
+    path     = "/health"
+    matcher  = "200"
+    timeout  = "4"
+    interval = "5"
+  }
+
+  stickiness {
+    type    = "lb_cookie"
+    enabled = false 
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_lb_target_group_attachment" "sensu_external" {
+  target_group_arn = aws_alb_target_group.sensu_external.arn
+  target_id        = aws_instance.instance.id
+  port             = 8081
+}
+
+# Create a new alb listener
+resource "aws_alb_listener" "sensu_https_external" {
+  load_balancer_arn = aws_alb.sensu_external.arn
+  port              = "8081"
+  protocol          = "HTTPS"
+  ssl_policy        = "ELBSecurityPolicy-FS-1-2-Res-2019-08" # PFS, TLS1.2, most "restrictive" policy (took awhile to find that)
+  certificate_arn   = aws_acm_certificate.cert_public.arn
+
+  default_action {
+    target_group_arn = aws_alb_target_group.sensu_external.arn
+    type             = "forward"
+  }
+}
+
+# #########################
+# # DNS Entry
+module "public_dns_record" {
+  source = "../../submodules/dns/public_ALIAS_record"
+
+  name = var.instance_name
+  target_dns_name = aws_alb.sensu_external.dns_name
+  target_zone_id  = aws_alb.sensu_external.zone_id
+  dns_info = var.dns_info
+
+  providers = {
+    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+  }
+}
+
+#----------------------------------------------------------------------------
+# ALB Security Group
+#----------------------------------------------------------------------------
+
+resource "aws_security_group" "sensu_alb_server_external" {
+  vpc_id      = var.vpc_id
+  name        = "sensu-alb-sg-external"
+  description = "Sensu LB SG"
+  tags = merge(var.standard_tags, var.tags)
+}
+
+#----------------------------------------------------------------------------
+# INGRESS
+#----------------------------------------------------------------------------
+
+#count = 0 in test. No need to let customers connect to test. 
+resource "aws_security_group_rule" "sensu-afs-pop" {
+  count = var.environment == "test" ? 0 : 1
+  description = "Sensu - AFS POP"
+  type = "ingress"
+  from_port = "443"
+  to_port = "443"
+  protocol = "tcp"
+  cidr_blocks = var.afs_pop
+  security_group_id = aws_security_group.sensu_alb_server_external.id
+}
+
+#count = 0 in test. No need to let customers connect to test.
+resource "aws_security_group_rule" "sensu-afs-azure-pop" {
+  count = var.environment == "test" ? 0 : 1
+  description = "Sensu - AFS Azure POP"
+  type = "ingress"
+  from_port = "443"
+  to_port = "443"
+  protocol = "tcp"
+  cidr_blocks = var.afs_azure_pop
+  security_group_id = aws_security_group.sensu_alb_server_external.id
+}
+
+#count = 0 in test. No need to let customers connect to test.
+resource "aws_security_group_rule" "sensu-nga-pop" {
+  count = var.environment == "test" ? 0 : 1
+  description = "Sensu - NGA POP"
+  type = "ingress"
+  from_port = "443"
+  to_port = "443"
+  protocol = "tcp"
+  cidr_blocks = var.nga_pop
+  security_group_id = aws_security_group.sensu_alb_server_external.id
+}
+
+
+#----------------------------------------------------------------------------
+# EGRESS
+#----------------------------------------------------------------------------
+
+resource "aws_security_group_rule" "alb_to_sensu_server" {
+  type              = "egress"
+  from_port         = 8081
+  to_port           = 8081
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.instance_security_group.id
+  description       = "Allows the ALB to talk to the Sensu servers"
+  security_group_id = aws_security_group.sensu_alb_server_external.id
+}
+
+resource "aws_security_group_rule" "alb_to_sensu_health" {
+  type              = "egress"
+  from_port         = 8080
+  to_port           = 8080
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.instance_security_group.id
+  description       = "Allows the ALB to talk to the Sensu Health check"
+  security_group_id = aws_security_group.sensu_alb_server_external.id
+}
+

+ 230 - 0
base/sensu/main.tf

@@ -0,0 +1,230 @@
+# Some instance variables
+locals {
+  ami_selection       = "minion" # master, minion, ...
+}
+
+# Rather than pass in the aws security group, we just look it up. This will
+# probably be useful other places, as well.
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "instance" {
+  subnet_id = var.subnets[0]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.instance_security_group.id ]
+  description = var.instance_name
+  tags = merge(var.standard_tags, var.tags, { Name = var.instance_name })
+}
+
+resource "aws_instance" "instance" {
+  #availability_zone = var.azs[count.index % 2]
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "msoc-default-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp2"
+      #volume_size = "60"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    volume_size = 48
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.instance.id
+  }
+
+  user_data = data.template_cloudinit_config.cloud-init.rendered
+  tags = merge( var.standard_tags, var.tags, { Name = var.instance_name })
+  volume_tags = merge( var.standard_tags, var.tags, { Name = var.instance_name })
+}
+
+data "template_file" "cloud-init" {
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
+
+  vars = {
+    hostname = var.instance_name
+    fqdn = "${var.instance_name}.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+    aws_region = var.aws_region
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud-init" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = data.template_file.cloud-init.rendered
+  }
+
+  # Additional parts as needed
+  #part {
+  #  content_type = "text/x-shellscript"
+  #  content      = "ffbaz"
+  #}
+}
+
+#----------------------------------------------------------------------------
+# Server SG
+#----------------------------------------------------------------------------
+
+resource "aws_security_group" "instance_security_group" {
+  name = "${var.instance_name}_security_group"
+  description = "Security Group for ${var.instance_name}(s)"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+#----------------------------------------------------------------------------
+# INGRESS
+#----------------------------------------------------------------------------
+
+resource "aws_security_group_rule" "sensu_ui" {
+  type              = "ingress"
+  from_port         = 8000
+  to_port           = 8000
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.sensu_alb_server_internal.id
+  description       = "Sensu UI"
+  security_group_id = aws_security_group.instance_security_group.id
+}
+
+resource "aws_security_group_rule" "sensu_agent_internal" {
+  type              = "ingress"
+  from_port         = 8081
+  to_port           = 8081
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.sensu_alb_server_internal.id
+  description       = "Internal Sensu Agents"
+  security_group_id = aws_security_group.instance_security_group.id
+}
+
+resource "aws_security_group_rule" "sensu_api" {
+  type              = "ingress"
+  from_port         = 8080
+  to_port           = 8080
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.sensu_alb_server_internal.id
+  description       = "Sensu API"
+  security_group_id = aws_security_group.instance_security_group.id
+}
+
+resource "aws_security_group_rule" "sensu_agent_external" {
+  type              = "ingress"
+  from_port         = 8081
+  to_port           = 8081
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.sensu_alb_server_external.id
+  description       = "External Sensu Agents"
+  security_group_id = aws_security_group.instance_security_group.id
+}
+
+resource "aws_security_group_rule" "sensu_api_external" {
+  type              = "ingress"
+  from_port         = 8080
+  to_port           = 8080
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.sensu_alb_server_external.id
+  description       = "External Sensu API"
+  security_group_id = aws_security_group.instance_security_group.id
+}

+ 11 - 0
base/sensu/outputs.tf

@@ -0,0 +1,11 @@
+output instance_arn {
+  value = aws_instance.instance.arn
+}
+
+output instance_private_ip {
+  value = aws_instance.instance.private_ip
+}
+
+output internal_alb_address {
+  value = aws_alb.sensu_internal.dns_name
+}

+ 52 - 0
base/sensu/vars.tf

@@ -0,0 +1,52 @@
+variable "instance_name" {
+  description = "Hostname, DNS entry, etc."
+  type = string
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "subnets" {
+  type = list(string)
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "xdr_interconnect" { type = list(string) }
+variable "nga_pop" { type = list(string) }
+variable "afs_azure_pop" { type = list(string) }
+variable "afs_pop" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "aws_account_id" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 12 - 5
base/vault-configuration/policies.tf

@@ -119,21 +119,28 @@ resource "vault_policy" "minions" {
 
 
 #restrict sensu salt-minion to only list secrets here - saltstack/minions
-#Policy must be named: saltstack/minion/sensu.msoc.defpoint.local
-# saltstack/minion/<minion-id>
+#Policy must be named: saltstack/minion/<minion-id>
+# e.g. saltstack/minion/sensu.pvt.xdrtest.accenturefederalcyber.com
 data "vault_policy_document" "sensu-minion" {
   rule {
-    path         = "auth/*"
-    capabilities = ["read", "list", "sudo", "create", "update", "delete"]
+    path         = "salt/*"
+    capabilities = ["list"]
+    description  = "sensu-minion"
+  }
+  rule {
+    path         = "salt/minions/sensu.${var.dns_info["private"]["zone"]}/*"
+    capabilities = ["read"]
     description  = "sensu-minion"
+
   }
 }
 
 resource "vault_policy" "sensu-minion" {
-  name   = "saltstack/minion/sensu.msoc.defpoint.local"
+  name   = "saltstack/minion/sensu.${var.dns_info["private"]["zone"]}"
   policy = data.vault_policy_document.sensu-minion.hcl
 }
 
+
 data "vault_policy_document" "soc" {
   rule {
     path         = "soc*"