Brad Poulton 4 tahun lalu
induk
melakukan
2cf5b83c20

+ 82 - 0
base/vault-configuration/engines.tf

@@ -0,0 +1,82 @@
+#----------------------------------------------------------------------------
+# Secrets Engines
+#----------------------------------------------------------------------------
+
+resource "vault_mount" "engineering" {
+  path        = "engineering"
+  type        = "kv-v2"
+  description = "engineering"
+}
+
+resource "vault_mount" "ghe-deploy-keys" {
+  path        = "ghe-deploy-keys"
+  type        = "kv-v2"
+  description = "ghe-deploy-keys"
+}
+
+resource "vault_mount" "jenkins" {
+  path        = "jenkins"
+  type        = "kv-v2"
+  description = "jenkins"
+}
+
+resource "vault_mount" "onboarding" {
+  path        = "onboarding"
+  type        = "kv-v2"
+  description = "onboarding"
+}
+
+resource "vault_mount" "onboarding-afs" {
+  path        = "onboarding-afs"
+  type        = "kv-v2"
+  description = "onboarding-afs"
+}
+
+resource "vault_mount" "onboarding-gallery" {
+  path        = "onboarding-gallery"
+  type        = "kv-v2"
+  description = "onboarding-gallery"
+}
+
+resource "vault_mount" "onboarding-saf" {
+  path        = "onboarding-saf"
+  type        = "kv-v2"
+  description = "onboarding-saf"
+}
+
+resource "vault_mount" "portal" {
+  path        = "portal"
+  type        = "kv-v2"
+  description = "portal"
+}
+
+resource "vault_mount" "phantom" {
+  path        = "phantom"
+  type        = "kv-v2"
+  description = "phantom"
+}
+
+resource "vault_mount" "soc" {
+  path        = "soc"
+  type        = "kv-v2"
+  description = "soc"
+}
+
+#salt supports kv
+resource "vault_mount" "salt" {
+  path        = "salt"
+  type        = "kv"
+  description = "salt"
+}
+
+#test secret
+resource "vault_generic_secret" "test" {
+  depends_on = [ vault_mount.salt ]
+  path = "salt/pillar_data"
+
+  data_json = <<EOT
+{
+  "my-pillar":   "my-secret"
+}
+EOT
+}

+ 158 - 0
base/vault-configuration/main.tf

@@ -0,0 +1,158 @@
+#----------------------------------------------------------------------------
+# Okta Auth
+#----------------------------------------------------------------------------
+
+resource "vault_okta_auth_backend" "okta" {
+    description  = "Terraform Okta auth backend"
+    organization = "mdr-multipass"
+    token        = var.okta_api_token
+    base_url     = "okta.com"
+    ttl          = "1h"
+    max_ttl      = "8h"
+
+    group {
+        group_name = "mdr-admins"
+        policies   = [vault_policy.admins.name]
+    }
+    group {
+        group_name = "mdr-engineers"
+        policies   = [vault_policy.engineers.name]
+    }
+    group {
+        group_name = "phantom-role-administrator"
+        policies   = [vault_policy.phantom.name]
+    }
+    group {
+        group_name = "vault-admins"
+        policies   = [vault_policy.admins.name]
+    }
+    group {
+        group_name = "analyst-shift-lead"
+        policies   = [vault_policy.soc.name]
+    }
+    group {
+        group_name = "analyst-tier-3"
+        policies   = [vault_policy.soc.name]
+    }
+}
+
+#----------------------------------------------------------------------------
+# Okta OIDC Auth
+#----------------------------------------------------------------------------
+
+#NOTICE: Members of the default_role do not need to type in the role, like a boss.
+# If you are not a member of the default_role, then you must type in your role, like a peasent. 
+resource "vault_jwt_auth_backend" "okta_oidc" {
+    description            = "Terraform Managed OIDC Auth"
+    path                   = "oidc"
+    type                   = "oidc"
+    oidc_discovery_url     = "https://mdr-multipass.okta.com"
+    oidc_client_id         = var.okta_oidc_client_id
+    oidc_client_secret     = var.okta_oidc_client_secret
+    bound_issuer           = "https://mdr-multipass.okta.com"
+    default_role           = "mdr-admins"
+    tune {
+        listing_visibility = "unauth"
+        max_lease_ttl      = "8h"
+        default_lease_ttl  = "1h"
+        token_type         = "default-service"
+    }
+    #the oidc_client_secret causes terraform to think it needs to apply changes. 
+    #lifecycle { ignore_changes = [oidc_client_secret,]}
+}
+
+#max token length of 28800 seconds ( 8 Hours )
+resource "vault_jwt_auth_backend_role" "okta_oidc" {
+  for_each               = var.roles
+  backend                = vault_jwt_auth_backend.okta_oidc.path
+  role_name              = each.key
+  token_policies         = each.value.token_policies
+  user_claim             = "email"
+  role_type              = "oidc"
+  allowed_redirect_uris  = ["https://vault.pvt.xdrtest.accenturefederalcyber.com/ui/vault/auth/oidc/oidc/callback" ]
+  oidc_scopes            = [ "profile", "email", "groups" ]
+  bound_claims           = { groups = join(",", each.value.bound_groups) }
+  verbose_oidc_logging   = false
+  token_explicit_max_ttl = "28800" 
+}
+
+#----------------------------------------------------------------------------
+# AWS Auth
+#----------------------------------------------------------------------------
+resource "vault_auth_backend" "aws" {
+  type = "aws"
+}
+
+#vault write auth/aws/config/client sts_endpoint=https://sts.us-gov-east-1.amazonaws.com sts_region=us-gov-east-1
+#https://github.com/terraform-providers/terraform-provider-vault/pull/717
+#https://github.com/terraform-providers/terraform-provider-vault/issues/689
+resource "vault_aws_auth_backend_client" "aws" {
+  backend      = vault_auth_backend.aws.path
+  sts_endpoint = "https://sts.us-gov-east-1.amazonaws.com"
+}
+
+#this role has not been created yet. BP - 11/6/2020
+# resource "vault_aws_auth_backend_role" "portal" {
+#   backend                         = vault_auth_backend.aws.path
+#   role                            = "portal"
+#   auth_type                       = "iam"
+#   bound_iam_principal_arns        = ["arn:aws-us-gov:iam::738800754746:role/portal-instance-role"]
+#   token_ttl                       = 60
+#   token_max_ttl                   = 86400
+#   token_policies                  = ["portal"]
+# }
+
+#this role has not been created yet. BP - 11/6/2020
+# resource "vault_aws_auth_backend_role" "portal-data-sync-lambda-role" {
+#   backend                         = vault_auth_backend.aws.path
+#   role                            = "portal-data-sync-lambda-role"
+#   auth_type                       = "iam"
+#   bound_iam_principal_arns        = ["arn:aws-us-gov:iam::738800754746:role/portal-data-sync-lambda-role"]
+#   token_ttl                       = 60
+#   token_max_ttl                   = 86400
+#   token_policies                  = ["portal"]
+# }
+
+#Legacy probably not used
+# resource "vault_aws_auth_backend_role" "clu" {
+#   backend                         = vault_auth_backend.aws.path
+#   role                            = "clu"
+#   auth_type                       = "iam"
+#   bound_iam_principal_arns        = ["arn:aws-us-gov:iam::738800754746:role/clu-instance-role"]
+#   token_ttl                       = 60
+#   token_max_ttl                   = 86400
+#   token_policies                  = ["clu"]
+#   inferred_aws_region             = "us-gov-east-1"
+# }
+
+
+#----------------------------------------------------------------------------
+# AppRole Auth
+#----------------------------------------------------------------------------
+
+resource "vault_auth_backend" "approle" {
+  type        = "approle"
+  description = "approle"
+
+}
+
+#generate approle for salt-master authentication
+resource "vault_approle_auth_backend_role" "salt-master" {
+  backend        = vault_auth_backend.approle.path
+  role_name      = "salt-master"
+  token_policies = ["salt-master"]
+  token_max_ttl  = "10800"
+}
+
+
+#----------------------------------------------------------------------------
+# File Audit
+#----------------------------------------------------------------------------
+
+resource "vault_audit" "file_audit" {
+  type    = "file"
+  options = {
+    file_path = "/var/log/vault.log"
+  }
+}
+

+ 162 - 0
base/vault-configuration/policies.tf

@@ -0,0 +1,162 @@
+#----------------------------------------------------------------------------
+# Policies
+#----------------------------------------------------------------------------
+
+#Admins
+data "vault_policy_document" "admins" {
+  rule {
+    path         = "*"
+    capabilities = ["create", "read", "update", "delete", "list", "sudo"]
+    description  = "allow all on permissions"
+  }
+}
+
+resource "vault_policy" "admins" {
+  name   = "admins"
+  policy = data.vault_policy_document.admins.hcl
+}
+
+#Clu Legacy
+data "vault_policy_document" "clu" {
+  rule {
+    path         = "jenkins*"
+    capabilities = ["read","list"]
+    description  = "clu read write on jenkins - legacy"
+  }
+}
+
+resource "vault_policy" "clu" {
+  name   = "clu"
+  policy = data.vault_policy_document.clu.hcl
+}
+
+#This access is for Feed Management/engineers. 
+data "vault_policy_document" "engineers" {
+  rule {
+    path         = "onboarding*"
+    capabilities = ["create", "read", "update", "delete", "list", "sudo"]
+    description  = "engineers/Feed Management"
+  }
+}
+
+resource "vault_policy" "engineers" {
+  name   = "engineers"
+  policy = data.vault_policy_document.engineers.hcl
+}
+
+#This access is for Phantom Admins. 
+data "vault_policy_document" "phantom" {
+  rule {
+    path         = "phantom*"
+    capabilities = ["create", "read", "update", "delete", "list", "sudo"]
+    description  = "Phantom"
+  }
+  rule {
+    path         = "onboarding*"
+    capabilities = ["create", "read", "update", "delete", "list", "sudo"]
+    description  = "onboarding"
+  }
+  rule {
+    path         = "portal*"
+    capabilities = ["create", "read", "update", "delete", "list", "sudo"]
+    description  = "Portal"
+  }
+}
+
+resource "vault_policy" "phantom" {
+  name   = "phantom"
+  policy = data.vault_policy_document.phantom.hcl
+}
+
+#portal
+data "vault_policy_document" "portal" {
+  rule {
+    path         = "portal*"
+    capabilities = ["create", "read", "update", "delete", "list", "sudo"]
+    description  = "Portal"
+  }
+}
+
+resource "vault_policy" "portal" {
+  name   = "portal"
+  policy = data.vault_policy_document.portal.hcl
+}
+
+#salt-master should be able to only create tokens
+data "vault_policy_document" "salt-master" {
+  rule {
+    path         = "auth/*"
+    capabilities = ["read", "list", "sudo", "create", "update", "delete"]
+    description  = "salt-master"
+  }
+}
+
+resource "vault_policy" "salt-master" {
+  name   = "salt-master"
+  policy = data.vault_policy_document.salt-master.hcl
+}
+
+
+#restrict salt-minions to only list secrets here - saltstack/minions
+#allow all minions access to this shared pillar data.
+data "vault_policy_document" "minions" {
+  rule {
+    path         = "salt/*"
+    capabilities = ["list"]
+    description  = "minions"
+  }
+  rule {
+    path         = "salt/pillar_data"
+    capabilities = ["read"]
+    description  = "minions"
+  }
+}
+
+resource "vault_policy" "minions" {
+  name   = "saltstack/minions"
+  policy = data.vault_policy_document.minions.hcl
+}
+
+
+#restrict sensu salt-minion to only list secrets here - saltstack/minions
+#Policy must be named: saltstack/minion/sensu.msoc.defpoint.local
+# saltstack/minion/<minion-id>
+data "vault_policy_document" "sensu-minion" {
+  rule {
+    path         = "auth/*"
+    capabilities = ["read", "list", "sudo", "create", "update", "delete"]
+    description  = "sensu-minion"
+  }
+}
+
+resource "vault_policy" "sensu-minion" {
+  name   = "saltstack/minion/sensu.msoc.defpoint.local"
+  policy = data.vault_policy_document.sensu-minion.hcl
+}
+
+data "vault_policy_document" "soc" {
+  rule {
+    path         = "soc*"
+    capabilities = ["create", "read", "update", "delete", "list", "sudo"]
+    description  = "soc"
+  }
+}
+
+resource "vault_policy" "soc" {
+  name   = "soc"
+  policy = data.vault_policy_document.soc.hcl
+}
+
+
+data "vault_policy_document" "read-only" {
+  rule {
+    path         = "/nothing/*"
+    capabilities = ["read", "list"]
+    description  = "No permissions"
+  }
+}
+
+resource "vault_policy" "read-only" {
+  name   = "read-only"
+  policy = data.vault_policy_document.read-only.hcl
+}

+ 35 - 0
base/vault-configuration/vars.tf

@@ -0,0 +1,35 @@
+#Thanks to https://github.com/onetwopunch/terraform-vault-okta/blob/master/main.tf for the code.
+variable "roles" {
+  type        = map
+  default     = {}
+  description = <<EOF
+Map of Vault role names to their bound groups and token policies. Structure looks like this:
+```
+roles = {
+  okta_admin = {
+    token_policies = ["admin"]
+    bound_groups = ["vault_admins"]
+  },
+  okta_devs  = {
+    token_policies = ["devs"]
+    bound_groups = ["vault_devs"]
+  }
+}
+```
+EOF
+}
+
+variable "okta_oidc_client_id" {
+  type        = string
+  description = "Okta Vault OIDC app client ID"
+}
+
+variable "okta_oidc_client_secret" {
+  type        = string
+  description = "Okta Vault OIDC app client secret"
+}
+
+variable "okta_api_token" {
+  type        = string
+  description = "Okta Vault api secret"
+}

+ 1 - 0
base/vault/amis.tf

@@ -0,0 +1 @@
+../amis.tf

+ 30 - 0
base/vault/certificate.tf

@@ -0,0 +1,30 @@
+resource "aws_acm_certificate" "cert" {
+  domain_name       = "vault.${var.dns_info["private"]["zone"]}"
+  validation_method = "DNS"
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_acm_certificate_validation" "cert" {
+  certificate_arn         = aws_acm_certificate.cert.arn
+  validation_record_fqdns = [for record in aws_route53_record.cert_validation: record.fqdn]
+}
+
+resource "aws_route53_record" "cert_validation" {
+  provider = aws.mdr-common-services-commercial
+
+  for_each = {
+    for dvo in aws_acm_certificate.cert.domain_validation_options : dvo.domain_name => {
+      name   = dvo.resource_record_name
+      record = dvo.resource_record_value
+      type   = dvo.resource_record_type
+    }
+  }
+
+  allow_overwrite = true
+  name            = each.value.name
+  records         = [each.value.record]
+  ttl             = 60
+  type            = each.value.type
+  zone_id         = var.dns_info["public"]["zone_id"]
+}

+ 72 - 0
base/vault/cloud-init/cloud_init.tpl

@@ -0,0 +1,72 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.${ aws_region }.amazonaws.com,ec2messages.${ aws_region }.amazonaws.com,ec2.${ aws_region }.amazonaws.com,ssmmessages.${ aws_region }.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 19 - 0
base/vault/dynamodb.tf

@@ -0,0 +1,19 @@
+resource "aws_dynamodb_table" "vault" {
+  name           = "vault-dynamodb"
+  billing_mode   = "PAY_PER_REQUEST"
+  hash_key       = "Path"
+  range_key      = "Key"
+
+  attribute {
+    name = "Path"
+    type = "S"
+  }
+
+  attribute {
+    name = "Key"
+    type = "S"
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+

+ 114 - 0
base/vault/elb.tf

@@ -0,0 +1,114 @@
+resource "aws_alb" "vault" {
+  name            = "vault-alb-${var.environment}"
+  security_groups = [ aws_security_group.vault_ALB_server.id ]
+  internal        = true 
+  subnets         = var.subnets
+
+
+  access_logs {
+    bucket  = "xdr-elb-${ var.environment }"
+    enabled = true
+  }
+
+  tags = merge(var.standard_tags, var.tags, { Name = "vault-alb-${var.environment}" })
+}
+
+# Create a new target group
+resource "aws_alb_target_group" "vault" {
+  name                 = "vault-alb-targets-https-${var.environment}"
+  port                 = 443 
+  protocol             = "HTTPS"
+  #deregistration_delay = "${local.lb_deregistration_delay}"
+  vpc_id               = var.vpc_id
+
+  health_check {
+    protocol = "HTTPS"
+    path     = "/v1/sys/health"
+    matcher  = "200"
+    timeout  = "4"
+    interval = "5"
+  }
+
+  stickiness {
+    type    = "lb_cookie"
+    enabled = false 
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_lb_target_group_attachment" "vault" {
+  for_each = toset(var.instance_count)
+  target_group_arn = aws_alb_target_group.vault.arn
+  target_id        = aws_instance.instance[each.key].id
+  port             = 443
+}
+
+# Create a new alb listener
+resource "aws_alb_listener" "vault_https" {
+  load_balancer_arn = aws_alb.vault.arn
+  port              = "443"
+  protocol          = "HTTPS"
+  ssl_policy        = "ELBSecurityPolicy-FS-1-2-Res-2019-08" # PFS, TLS1.2, most "restrictive" policy (took awhile to find that)
+  certificate_arn   = aws_acm_certificate.cert.arn
+
+  default_action {
+    target_group_arn = aws_alb_target_group.vault.arn
+    type             = "forward"
+  }
+}
+
+# #########################
+# # DNS Entry
+# module "public_dns_record" {
+#   source = "../../submodules/dns/public_ALIAS_record"
+
+#   name = var.instance_name
+#   target_dns_name = aws_lb.openvpn-nlb.dns_name
+#   target_zone_id  = aws_lb.openvpn-nlb.zone_id
+#   dns_info = var.dns_info
+
+#   providers = {
+#     aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+#   }
+# }
+
+#DNS Alias for the LB ( the CNAME was required. an Alias did NOT work due to aws/bug. )
+resource "aws_route53_record" "vault_internal" {
+  zone_id = var.dns_info["private"]["zone_id"]
+  name    = var.instance_name 
+  type    = "CNAME" 
+  records = [aws_alb.vault.dns_name]
+  ttl = "60"
+  provider = aws.c2
+}
+
+#----------------------------------------------------------------------------
+# Vault ALB Security Group
+#----------------------------------------------------------------------------
+
+resource "aws_security_group" "vault_ALB_server" {
+  vpc_id      = var.vpc_id
+  name        = "vault-alb-sg"
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group_rule" "vault_server_from_vpc" {
+  type              = "ingress"
+  from_port         = 443
+  to_port           = 443
+  protocol          = "tcp"
+  cidr_blocks       = ["10.0.0.0/8"]
+  description       = "Allows the server to receive traffic from everywhere"
+  security_group_id = aws_security_group.vault_ALB_server.id
+}
+
+resource "aws_security_group_rule" "alb_to_vault_server" {
+  type              = "egress"
+  from_port         = 443
+  to_port           = 443
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.instance_security_group.id
+  description       = "Allows the ALB to talk to the vault servers"
+  security_group_id = aws_security_group.vault_ALB_server.id
+}

+ 150 - 0
base/vault/iam.tf

@@ -0,0 +1,150 @@
+resource "aws_iam_instance_profile" "vault_instance_profile" {
+  name     = "vault-instance-profile"
+  role     = aws_iam_role.vault.name
+}
+
+resource "aws_iam_role" "vault" {
+  name     = "vault-instance-role"
+
+  assume_role_policy = <<EOF
+{   
+    "Version": "2012-10-17",
+    "Statement": [
+      { 
+        "Sid": "",
+        "Effect": "Allow",
+        "Principal": {
+          "Service": [
+            "ec2.amazonaws.com",
+            "ssm.amazonaws.com"
+            ]
+        },
+        "Action": "sts:AssumeRole"
+      }
+    ]
+  }
+EOF
+}
+
+#-------------------------------
+# KMS Policy
+#-------------------------------
+
+data "aws_iam_policy_document" "vault_kms_key_policy" {
+  statement {
+    sid    = "KMSAutoUnseal"
+    effect = "Allow"
+
+    actions = [
+      "kms:Encrypt",
+      "kms:Decrypt",
+      "kms:DescribeKey",
+    ]
+
+    resources = [
+      aws_kms_key.vault.arn,
+    ]
+  }
+
+  statement {
+    sid    = "Tags"
+    effect = "Allow"
+
+    actions = [
+      "ec2:DescribeTags",
+      "ec2:DescribeInstances"
+    ]
+    resources = [
+      "*"
+    ]
+  }
+}
+
+resource "aws_iam_policy" "vault_kms_key_policy" {
+  name     = "vault_kms"
+  path     = "/"
+  policy   = data.aws_iam_policy_document.vault_kms_key_policy.json
+}
+
+resource "aws_iam_role_policy_attachment" "vault_kms" {
+  role       = aws_iam_role.vault.name
+  policy_arn = aws_iam_policy.vault_kms_key_policy.arn
+}
+
+resource "aws_iam_role_policy_attachment" "AmazonEC2RoleforSSM" {
+  role       = aws_iam_role.vault.name
+  policy_arn = "arn:aws-us-gov:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
+}
+
+#------------------------------
+# DynamoDB 
+#------------------------------
+
+data "aws_iam_policy_document" "vault_dynamodb_policy" {
+  statement {
+    sid    = "AllowVaultCommunicationtoDynamoDB"
+    effect = "Allow"
+
+    actions = [
+      "dynamodb:DescribeLimits",
+      "dynamodb:DescribeTimeToLive",
+      "dynamodb:ListTagsOfResource",
+      "dynamodb:DescribeReservedCapacityOfferings",
+      "dynamodb:DescribeReservedCapacity",
+      "dynamodb:ListTables",
+      "dynamodb:BatchGetItem",
+      "dynamodb:BatchWriteItem",
+      "dynamodb:CreateTable",
+      "dynamodb:DeleteItem",
+      "dynamodb:GetItem",
+      "dynamodb:GetRecords",
+      "dynamodb:PutItem",
+      "dynamodb:Query",
+      "dynamodb:UpdateItem",
+      "dynamodb:Scan",
+      "dynamodb:DescribeTable",
+    ]
+
+    resources = [aws_dynamodb_table.vault.arn]
+  }
+}
+
+resource "aws_iam_policy" "vault_dynamodb_policy" {
+  name     = "vault_dynamodb"
+  path     = "/"
+  policy   = data.aws_iam_policy_document.vault_dynamodb_policy.json
+}
+
+resource "aws_iam_role_policy_attachment" "vault_dynamodb" {
+  role       = aws_iam_role.vault.name
+  policy_arn = aws_iam_policy.vault_dynamodb_policy.arn
+}
+
+# ---------------------------------------------------------------------------------------------------------------------
+# IAM Policy for EC2 AppRole Authentication 
+# ---------------------------------------------------------------------------------------------------------------------
+
+data "aws_iam_policy_document" "vault_approle" {
+  statement {
+    sid    = "AllowVaultIAMMetaData"
+    effect = "Allow"
+
+    actions = [ 
+        "iam:GetInstanceProfile",
+        "iam:GetRole"
+    ]   
+
+    resources = ["*"]
+  }
+}
+
+resource "aws_iam_policy" "vault_approle_policy" {
+  name     = "vault_approle"
+  path     = "/" 
+  policy   = data.aws_iam_policy_document.vault_approle.json
+}
+
+resource "aws_iam_role_policy_attachment" "vault_approle" {
+  role       = aws_iam_role.vault.name
+  policy_arn = aws_iam_policy.vault_approle_policy.arn
+}

+ 16 - 0
base/vault/kms.tf

@@ -0,0 +1,16 @@
+#-------------------------------
+# KMS Key
+#-------------------------------
+
+resource "aws_kms_key" "vault" {
+  description = "Vault unseal key"
+  enable_key_rotation = true
+  tags = {
+    Name = "vault-kms-unseal-${var.environment}"
+  }
+}
+
+resource "aws_kms_alias" "vault" {
+  name          = "alias/vault-kms-unseal-${var.environment}"
+  target_key_id = aws_kms_key.vault.key_id
+}

+ 257 - 0
base/vault/main.tf

@@ -0,0 +1,257 @@
+# Some instance variables
+locals {
+  ami_selection       = "minion" # master, minion, ...
+}
+
+# Rather than pass in the aws security group, we just look it up. This will
+# probably be useful other places, as well.
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "instance" {
+  for_each = toset(var.instance_count)
+  subnet_id = var.subnets[each.value - 1]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.instance_security_group.id ]
+  description = "${var.instance_name}-${each.value}"
+  tags = merge(var.standard_tags, var.tags, { Name = "${var.instance_name}-${each.value}" })
+}
+
+# resource "aws_eip" "instance" {
+#   for_each = toset(var.instance_count)
+#   vpc = true
+#   tags = merge(var.standard_tags, var.tags, { Name = "${var.instance_name}-${each.value}" })
+# }
+
+# resource "aws_eip_association" "instance" {
+#   for_each = toset(var.instance_count)
+#   network_interface_id = aws_network_interface.instance[each.key].id
+#   allocation_id = aws_eip.instance[each.key].id
+# }
+
+resource "aws_instance" "instance" {
+  for_each = toset(var.instance_count)
+  #availability_zone = var.azs[count.index % 2]
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "vault-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp2"
+      #volume_size = "60"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    volume_size = 48
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.instance[each.key].id
+  }
+  
+  #TODO switch to dynamic tag
+  user_data = data.template_cloudinit_config.cloud_init_config[each.key].rendered
+  tags = merge( var.standard_tags, var.tags, map("Name", length(var.instance_count) > 1 ? "${var.instance_name}-${each.value}" : var.instance_name ))
+  volume_tags = merge( var.standard_tags, var.tags, map("Name", length(var.instance_count) > 1 ? "${var.instance_name}-${each.value}" : var.instance_name ))
+}
+
+module "private_dns_record" {
+  for_each = toset(var.instance_count)
+  source = "../../submodules/dns/private_A_record"
+
+  name = "${var.instance_name}-${each.value}"
+  ip_addresses = [ aws_instance.instance[each.key].private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}
+
+#The Cloud init data is to prepare Vault.  
+data "template_file" "cloud_init" {
+  for_each = toset(var.instance_count)
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = "${file("${path.module}/cloud-init/cloud_init.tpl")}"
+
+  vars = {
+    hostname = "${var.instance_name}-${each.value}"
+    fqdn = "${var.instance_name}-${each.value}.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+    aws_region = var.aws_region
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud_init_config" {
+  for_each = toset(var.instance_count)
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = "${data.template_file.cloud_init[each.key].rendered}"
+  }
+  
+}
+
+#----------------------------------------------------------------------------
+# Vault Server SG
+#----------------------------------------------------------------------------
+
+resource "aws_security_group" "instance_security_group" {
+  name = "${var.instance_name}_security_group"
+  description = "Security Group for ${var.instance_name}(s)"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+#----------------------------------------------------------------------------
+# Vault INGRESS
+#----------------------------------------------------------------------------
+
+resource "aws_security_group_rule" "vault_server_from_alb" {
+  type              = "ingress"
+  from_port         = 443
+  to_port           = 443
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.vault_ALB_server.id
+  description       = "Allows the servers to receive traffic for troubleshooting"
+  security_group_id = aws_security_group.instance_security_group.id
+}
+
+
+#----------------------------------------------------------------------------
+# Vault EGRESS
+#----------------------------------------------------------------------------
+
+resource "aws_security_group_rule" "https-out" {
+  description = "For endpoints and troubleshooting"
+  type = "egress"
+  from_port = 443
+  to_port = 443
+  protocol = "tcp"
+  cidr_blocks = [ "10.0.0.0/8" ]
+  security_group_id = aws_security_group.instance_security_group.id
+}
+
+resource "aws_security_group_rule" "vault_server_to_alb" {
+  type              = "egress"
+  from_port         = 443
+  to_port           = 443
+  protocol          = "tcp"
+  source_security_group_id = aws_security_group.vault_ALB_server.id
+  description = "Allow vault to communicate via HTTPS w/ other members in cluster"
+  security_group_id = aws_security_group.instance_security_group.id
+}
+
+#grab the dynamodb endpoint
+data "aws_prefix_list" "private_dynamodb" {
+  name = "com.amazonaws.*.dynamodb"
+}
+
+resource "aws_security_group_rule" "vault_server_egress_dynamodb" {
+  type              = "egress"
+  from_port         = 443
+  to_port           = 443
+  protocol          = "tcp"
+  security_group_id = aws_security_group.instance_security_group.id
+  description       = "Outbound to Dynamodb"
+  prefix_list_ids   = [ data.aws_prefix_list.private_dynamodb.id ]
+}

+ 13 - 0
base/vault/outputs.tf

@@ -0,0 +1,13 @@
+output instance_arn {
+  value = {
+    for instance in aws_instance.instance:
+    instance.id => instance.arn
+  }
+}
+
+output instance_private_ip {
+  value = {
+    for instance in aws_instance.instance:
+    instance.id => instance.private_ip
+  }
+}

+ 58 - 0
base/vault/vars.tf

@@ -0,0 +1,58 @@
+variable "instance_name" {
+  description = "Hostname, DNS entry, etc."
+  type = string
+}
+
+variable "instance_count" {
+  description = "Number of servers"
+  type = list(string)
+  default = ["1","2","3"]
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "subnets" {
+  type = list(string)
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "xdr_interconnect" { type = list(string) }
+variable "nga_pop" { type = list(string) }
+variable "afs_azure_pop" { type = list(string) }
+variable "afs_pop" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "aws_account_id" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/vault/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.13"
+}