Browse Source

Adds per-instance type roles to Splunk; Creates Apps S3 Bucket

Creates a base_instance_profile module to keep basic instance profile
information in sync.

To be tagged v3.3.8
Fred Damstra [afs macbook] 3 years ago
parent
commit
5582348da1

+ 1 - 0
base/salt_master/iam.tf

@@ -58,6 +58,7 @@ data "aws_iam_policy_document" "salt_master_policy_doc" {
     resources = [
       "arn:${var.aws_partition}:iam::*:role/service/salt-master-inventory-role",
       "arn:${var.aws_partition}:iam::*:role/service/afsxdr-binaries_writers",
+      "arn:${var.aws_partition}:iam::*:role/service/splunk-apps-s3-writer",
     ]
   }
 }

+ 100 - 0
base/splunk_servers/app_s3_bucket/iam_splunk_apps_s3_role.tf

@@ -0,0 +1,100 @@
+locals {
+  is_moose = length(regexall("moose", var.splunk_prefix)) > 0 ? true : false
+  sh_role = local.is_moose ? "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/instance/${ var.splunk_prefix }-splunk-sh-instance-role" : "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/instance/splunk-sh-instance-role"
+
+  base_role_arns = [
+    local.sh_role,
+    "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/instance/xdr-indexer-instance-role",
+    "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/instance/xdr-cm-instance-role",
+    "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/instance/xdr-hf-instance-role",
+  ]
+
+  role_arns = concat(
+    local.base_role_arns,
+    var.has_cust_sh ? [ "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/instance/xdr-custsh-instance-role" ] : []
+  )
+}
+
+resource "aws_iam_role" "splunk_apps_s3_role" {
+  name = "splunk-apps-s3"
+  path  = "/service/"
+  force_detach_policies = true # causes "DeleteConflict" if not present
+
+  # the extra_trusted_salt variable allows the addition of additional
+  # trusted sources, such as the dev salt master (for dev environments)
+  # and developer users.
+  assume_role_policy = <<EOF
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Principal": {
+        "AWS": ${jsonencode(local.role_arns)}
+      },
+      "Action": "sts:AssumeRole"
+    }
+  ]
+}
+EOF
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_iam_role_policy_attachment" "splunk_apps_s3_policy_attach" {
+  role = aws_iam_role.splunk_apps_s3_role.name
+  policy_arn = aws_iam_policy.splunk_apps_s3_policy.arn
+}
+
+resource "aws_iam_policy" "splunk_apps_s3_policy" {
+  name  = "splunk-apps-s3-policy"
+  path  = "/service/"
+  description = "Policy which allows splunk systems to read from the S3 bucket"
+  policy = data.aws_iam_policy_document.splunk_apps_s3_policy_doc.json
+}
+
+data "aws_iam_policy_document" "splunk_apps_s3_policy_doc" {
+  statement {
+    sid = "GeneralBucketAccess"
+    effect = "Allow"
+    actions = [
+      "s3:ListAllMyBuckets",
+      "s3:HeadBucket",
+    ]
+    resources = [ "*" ]
+  }
+
+  statement {
+    sid       = "AccessTheAppBucket"
+    effect    = "Allow"
+    resources = ["arn:${ var.aws_partition }:s3:::xdr-${ var.splunk_prefix }-${ var.environment }-splunk-apps"]
+
+    actions = [
+      "s3:ListBucket",
+      "s3:GetBucketLocation",
+    ]
+  }
+
+  statement {
+    sid       = "GetFromTheBucket"
+    effect    = "Allow"
+    resources = ["arn:${ var.aws_partition }:s3:::xdr-${ var.splunk_prefix }-${ var.environment }-splunk-apps/*"]
+
+    actions = [
+      "s3:GetObject",
+      "s3:GetObjectAcl",
+    ]
+  }
+
+  statement {
+    sid       = "UseTheKey"
+    effect    = "Allow"
+    resources = [
+      aws_kms_key.bucketkey.arn
+    ]
+    actions = [
+      "kms:Decrypt",
+      "kms:DescribeKey"
+    ]
+  }
+}

+ 95 - 0
base/splunk_servers/app_s3_bucket/iam_splunk_apps_s3_writer_role.tf

@@ -0,0 +1,95 @@
+resource "aws_iam_role" "splunk_apps_s3_writer_role" {
+  name = "splunk-apps-s3-writer"
+  path  = "/service/"
+  force_detach_policies = true # causes "DeleteConflict" if not present
+
+  assume_role_policy = <<EOF
+{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Principal": {
+        "AWS": [
+          "arn:${var.aws_partition}:iam::${var.c2_accounts[var.aws_partition]}:role/salt-master-instance-role"
+        ]
+      },
+      "Action": "sts:AssumeRole"
+    }
+  ]
+}
+EOF
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_iam_role_policy_attachment" "splunk_apps_s3_writer_policy_attach" {
+  role = aws_iam_role.splunk_apps_s3_writer_role.name
+  policy_arn = aws_iam_policy.splunk_apps_s3_writer_policy.arn
+}
+
+resource "aws_iam_policy" "splunk_apps_s3_writer_policy" {
+  name  = "splunk-apps-s3-writer-policy"
+  path  = "/service/"
+  description = "Policy which allows splunk systems to read from the S3 bucket"
+  policy = data.aws_iam_policy_document.splunk_apps_s3_writer_policy_doc.json
+}
+
+data "aws_iam_policy_document" "splunk_apps_s3_writer_policy_doc" {
+  statement {
+    sid = "GeneralBucketAccess"
+    effect = "Allow"
+    actions = [
+      "s3:ListAllMyBuckets",
+      "s3:HeadBucket",
+    ]
+    resources = [ "*" ]
+  }
+
+  statement {
+    sid       = "AccessTheAppBucket"
+    effect    = "Allow"
+
+    actions = [
+      "s3:GetLifecycleConfiguration",
+      "s3:DeleteObjectVersion",
+      "s3:ListBucketVersions",
+      "s3:GetBucketLogging",
+      "s3:RestoreObject",
+      "s3:ListBuckets",
+      "s3:GetBucketVersioning",
+      "s3:PutObject",
+      "s3:GetObject",
+      "s3:PutLifecycleConfiguration",
+      "s3:GetBucketCORS",
+      "s3:DeleteObject",
+      "s3:GetBucketLocation",
+      "s3:GetObjectVersion",
+    ]
+
+    resources = [
+      "arn:${ var.aws_partition }:s3:::xdr-${ var.splunk_prefix }-${ var.environment }-splunk-apps",
+      "arn:${ var.aws_partition }:s3:::xdr-${ var.splunk_prefix }-${ var.environment }-splunk-apps/*",
+    ]
+  }
+
+  statement {
+    sid       = "UseTheKey"
+    effect    = "Allow"
+    resources = [
+      aws_kms_key.bucketkey.arn
+    ]
+    actions = [
+      "kms:Decrypt",
+      "kms:GenerateDataKeyWithoutPlaintext",
+      "kms:Verify",
+      "kms:GenerateDataKeyPairWithoutPlaintext",
+      "kms:GenerateDataKeyPair",
+      "kms:ReEncryptFrom",
+      "kms:Encrypt",
+      "kms:GenerateDataKey",
+      "kms:ReEncryptTo",
+      "kms:Sign",
+    ]
+  }
+}

+ 103 - 0
base/splunk_servers/app_s3_bucket/kms.tf

@@ -0,0 +1,103 @@
+locals {
+  kms_users = [ aws_iam_role.splunk_apps_s3_role.arn ]
+}
+
+resource "aws_kms_key" "bucketkey" {
+  description             = "S3 KMS for ${local.bucket_name}."
+  deletion_window_in_days = 30
+  enable_key_rotation     = true
+  policy                  = data.aws_iam_policy_document.kms_key_policy.json
+  tags                    = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_kms_alias" "bucketkey" {
+  name          = "alias/SplunkApps"
+  target_key_id = aws_kms_key.bucketkey.key_id
+}
+
+data "aws_iam_policy_document" "kms_key_policy" {
+  policy_id = local.bucket_name
+  statement {
+    sid    = "Enable IAM User Permissions"
+    effect = "Allow"
+    principals {
+      type        = "AWS"
+      identifiers = [
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:root",
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:user/MDRAdmin",
+      ]
+    }
+    actions   = ["kms:*"]
+    resources = ["*"]
+  }
+
+  statement {
+    sid    = "Allow access for Engineers"
+    effect = "Allow"
+    principals {
+      type = "AWS"
+      identifiers = [
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:user/MDRAdmin",
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/user/mdr_terraformer",
+      ]
+    }
+
+    actions = [
+      "kms:Create*",
+      "kms:Describe*",
+      "kms:Enable*",
+      "kms:List*",
+      "kms:Put*",
+      "kms:Update*",
+      "kms:Revoke*",
+      "kms:Disable*",
+      "kms:Get*",
+      "kms:Delete*",
+      "kms:TagResource",
+      "kms:UntagResource",
+      "kms:ScheduleKeyDeletion",
+      "kms:CancelKeyDeletion"
+    ]
+    resources = ["*"]
+  }
+
+  statement {
+    sid    = "Allow use of the key to encrypt and decrypt"
+    effect = "Allow"
+    principals {
+      type = "AWS"
+      identifiers = local.kms_users
+    }
+    actions = [
+      "kms:Encrypt",
+      "kms:Decrypt",
+      "kms:ReEncrypt*",
+      "kms:GenerateDataKey*",
+      "kms:DescribeKey"
+    ]
+    resources = ["*"]
+  }
+
+  statement {
+    sid    = "Allow attachment of persistent resources"
+    effect = "Allow"
+    principals {
+      type = "AWS"
+      identifiers = [
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:user/MDRAdmin",
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/user/mdr_terraformer",
+      ]
+    }
+    actions = [
+      "kms:CreateGrant",
+      "kms:ListGrants",
+      "kms:RevokeGrant"
+    ]
+    resources = ["*"]
+    condition {
+      test     = "Bool"
+      variable = "kms:GrantIsForAWSResource"
+      values   = ["true"]
+    }
+  }
+}

+ 86 - 0
base/splunk_servers/app_s3_bucket/main.tf

@@ -0,0 +1,86 @@
+locals {
+  bucket_name  = "xdr-${var.splunk_prefix}-${var.environment}-splunk-apps"
+  accounts     = [var.aws_account_id]
+  account_arns = [for a in local.accounts : "arn:${var.aws_partition}:iam::${a}:root"]
+}
+
+resource "aws_s3_bucket" "bucket" {
+  bucket = local.bucket_name
+  acl    = "private"
+
+  versioning {
+    enabled = false
+  }
+
+  tags = merge(var.standard_tags, var.tags)
+
+  #logging {
+  #  target_bucket = "dps-s3-logs"
+  #  target_prefix = "aws_terraform_s3_state_access_logs/"
+  #}
+
+  lifecycle_rule {
+    id      = "APPS_POLICY"
+    enabled = true
+
+    abort_incomplete_multipart_upload_days = 2
+
+    transition {
+      days          = 30
+      storage_class = "INTELLIGENT_TIERING"
+    }
+
+    #    expiration {
+    #      days = 365
+    #    }
+  }
+
+  server_side_encryption_configuration {
+    rule {
+      apply_server_side_encryption_by_default {
+        kms_master_key_id = aws_kms_key.bucketkey.arn
+        sse_algorithm     = "aws:kms"
+      }
+    }
+  }
+}
+
+resource "aws_s3_bucket_public_access_block" "public_access_block" {
+  bucket                  = aws_s3_bucket.bucket.id
+  block_public_acls       = true
+  block_public_policy     = true
+  ignore_public_acls      = true
+  restrict_public_buckets = true
+
+  # Not technically dependent, but prevents a "Conflicting conditional operation" conflict.
+  # See https://github.com/hashicorp/terraform-provider-aws/issues/7628
+  depends_on = [aws_s3_bucket_policy.policy]
+}
+
+resource "aws_s3_bucket_policy" "policy" {
+  bucket = aws_s3_bucket.bucket.id
+
+  policy = <<POLICY
+{
+  "Version": "2012-10-17",
+  "Id": "AllowThisAccount",
+  "Statement": [
+    {
+      "Sid": "AccountAllow",
+      "Effect": "Allow",
+      "Principal": {
+        "AWS": ${jsonencode(local.account_arns)}
+      },
+      "Action": [
+        "s3:GetObject",
+        "s3:ListBucket"
+      ],
+      "Resource": [
+        "${aws_s3_bucket.bucket.arn}",
+        "${aws_s3_bucket.bucket.arn}/*"
+      ]
+    }
+  ]
+}
+POLICY
+}

+ 11 - 0
base/splunk_servers/app_s3_bucket/outputs.tf

@@ -0,0 +1,11 @@
+output "BucketName" {
+  value = aws_s3_bucket.bucket.id
+}
+
+output "Splunk_Role_ARN" {
+  value = aws_iam_role.splunk_apps_s3_role.arn
+}
+
+output "Writer_Role_ARN" {
+  value = aws_iam_role.splunk_apps_s3_writer_role.arn
+}

+ 25 - 0
base/splunk_servers/app_s3_bucket/vars.tf

@@ -0,0 +1,25 @@
+variable "has_cust_sh" {
+  description = "True if there is a customer search head."
+  type = bool
+  default = false
+}
+
+variable "splunk_prefix" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags for the bucket and kms key."
+  type = map
+}
+
+# ----------------------------------
+# Below this line are variables inherited from higher levels, so they
+# do not need to be explicitly passed to this module.
+variable "standard_tags" { type = map }
+variable "aws_account_id" { type = string }
+variable "account_list" { type = list }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "environment" { type = string }
+variable "c2_accounts" { type = map }

+ 34 - 0
base/splunk_servers/cluster_master/instance_profile.tf

@@ -0,0 +1,34 @@
+module "instance_profile" {
+  source = "../../../submodules/iam/base_instance_profile"
+  prefix = "xdr-cm"
+  aws_partition = var.aws_partition
+  aws_account_id = var.aws_account_id
+}
+
+# Instance Specific Policy
+resource "aws_iam_policy" "instance_policy" {
+  name        = "cm-instance-policy"
+  path        = "/launchroles/"
+  description = "This policy allows cm-specific functions"
+  policy      = data.aws_iam_policy_document.instance_policy_doc.json
+}
+
+data "aws_iam_policy_document" "instance_policy_doc" {
+  statement {
+    sid    = "AllowAssumeRoleToSplunkApps"
+    effect = "Allow"
+
+    actions = [
+      "sts:AssumeRole"
+    ]
+
+    resources = [
+      "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/service/splunk-apps-s3"
+    ]
+  }
+}
+
+resource "aws_iam_role_policy_attachment" "cm_instance_policy_attach" {
+  role       = module.instance_profile.role_id
+  policy_arn = aws_iam_policy.instance_policy.arn
+}

+ 1 - 1
base/splunk_servers/cluster_master/main.tf

@@ -33,7 +33,7 @@ resource "aws_instance" "instance" {
   instance_type = var.instance_type
   key_name = "msoc-build"
   monitoring = false
-  iam_instance_profile = "msoc-default-instance-profile"
+  iam_instance_profile = module.instance_profile.profile_id
 
   ami = local.ami_map[local.ami_selection]
   # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.

+ 1 - 1
base/splunk_servers/cluster_master/vars.tf

@@ -55,7 +55,7 @@ variable "reverse_enabled" {
 variable "trusted_ips" { type = list(string) }
 variable "proxy" { type = string }
 variable "salt_master" { type = string }
-
+variable "aws_account_id" { type = string }
 variable "cidr_map" { type = map }
 variable "dns_info" { type = map }
 variable "standard_tags" { type = map }

+ 34 - 0
base/splunk_servers/customer_searchhead/instance_profile.tf

@@ -0,0 +1,34 @@
+module "instance_profile" {
+  source = "../../../submodules/iam/base_instance_profile"
+  prefix = "xdr-custsh"
+  aws_partition = var.aws_partition
+  aws_account_id = var.aws_account_id
+}
+
+# Customer SH Specific Policy
+resource "aws_iam_policy" "instance_policy" {
+  name        = "custsh_instance_policy"
+  path        = "/launchroles/"
+  description = "This policy allows custsh-specific functions"
+  policy      = data.aws_iam_policy_document.instance_policy_doc.json
+}
+
+data "aws_iam_policy_document" "instance_policy_doc" {
+  statement {
+    sid    = "AllowAssumeRoleToSplunkApps"
+    effect = "Allow"
+
+    actions = [
+      "sts:AssumeRole"
+    ]
+
+    resources = [
+      "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/service/splunk-apps-s3"
+    ]
+  }
+}
+
+resource "aws_iam_role_policy_attachment" "instance_policy_attach" {
+  role       = module.instance_profile.role_id
+  policy_arn = aws_iam_policy.custsh_instance_policy.arn
+}

+ 1 - 1
base/splunk_servers/customer_searchhead/main.tf

@@ -35,7 +35,7 @@ resource "aws_instance" "instance" {
   instance_type = var.instance_type
   key_name = "msoc-build"
   monitoring = false
-  iam_instance_profile = "msoc-default-instance-profile"
+  iam_instance_profile = module.instance_profile.profile_id
 
   ami = local.ami_map[local.ami_selection]
   # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.

+ 33 - 0
base/splunk_servers/heavy_forwarder/instance_profile.tf

@@ -0,0 +1,33 @@
+module "instance_profile" {
+  source = "../../../submodules/iam/base_instance_profile"
+  prefix = "xdr-hf"
+  aws_partition = var.aws_partition
+  aws_account_id = var.aws_account_id
+}
+
+resource "aws_iam_policy" "instance_policy" {
+  name        = "hf_instance_policy"
+  path        = "/launchroles/"
+  description = "This policy allows hf-specific functions"
+  policy      = data.aws_iam_policy_document.instance_policy_doc.json
+}
+
+data "aws_iam_policy_document" "instance_policy_doc" {
+  statement {
+    sid    = "AllowAssumeRoleToSplunkApps"
+    effect = "Allow"
+
+    actions = [
+      "sts:AssumeRole"
+    ]
+
+    resources = [
+      "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/service/splunk-apps-s3"
+    ]
+  }
+}
+
+resource "aws_iam_role_policy_attachment" "hf_instance_policy_attach" {
+  role       = module.instance_profile.role_id
+  policy_arn = aws_iam_policy.instance_policy.arn
+}

+ 1 - 1
base/splunk_servers/heavy_forwarder/main.tf

@@ -33,7 +33,7 @@ resource "aws_instance" "instance" {
   instance_type = var.instance_type
   key_name = "msoc-build"
   monitoring = false
-  iam_instance_profile = "msoc-default-instance-profile"
+  iam_instance_profile = module.instance_profile.profile_id
 
   ami = local.ami_map[local.ami_selection]
   # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.

+ 1 - 0
base/splunk_servers/heavy_forwarder/vars.tf

@@ -54,6 +54,7 @@ variable "cidr_map" { type = map }
 variable "dns_info" { type = map }
 variable "standard_tags" { type = map }
 variable "environment" { type = string }
+variable "aws_account_id" { type = string }
 variable "aws_region" { type = string }
 variable "aws_partition" { type = string }
 variable "aws_partition_alias" { type = string }

+ 3 - 3
base/splunk_servers/indexer_cluster/asg.tf

@@ -12,7 +12,7 @@ module "indexer0" {
   key_name                   = "msoc-build"
   min_size                   = var.splunk_asg_sizes[0]
   max_size                   = var.splunk_asg_sizes[0]
-  iam_instance_profile       = aws_iam_instance_profile.indexer_instance_profile.name
+  iam_instance_profile       = module.instance_profile.profile_id
   common_services_account    = var.common_services_account
   tags = merge(var.standard_tags, var.tags, var.instance_tags[0], { Name = "${local.asg_name}-0" } )
 }
@@ -31,7 +31,7 @@ module "indexer1" {
   key_name                   = "msoc-build"
   min_size                   = var.splunk_asg_sizes[1]
   max_size                   = var.splunk_asg_sizes[1]
-  iam_instance_profile       = aws_iam_instance_profile.indexer_instance_profile.name
+  iam_instance_profile       = module.instance_profile.profile_id
   common_services_account    = var.common_services_account
   tags = merge(var.standard_tags, var.tags, var.instance_tags[1], { Name = "${local.asg_name}-1" } )
 }
@@ -50,7 +50,7 @@ module "indexer2" {
   key_name                   = "msoc-build"
   min_size                   = var.splunk_asg_sizes[2]
   max_size                   = var.splunk_asg_sizes[2]
-  iam_instance_profile       = aws_iam_instance_profile.indexer_instance_profile.name
+  iam_instance_profile       = module.instance_profile.profile_id
   common_services_account    = var.common_services_account
   tags = merge(var.standard_tags, var.tags, var.instance_tags[2], { Name = "${local.asg_name}-2" } )
 }

+ 23 - 53
base/splunk_servers/indexer_cluster/instance_profile.tf

@@ -1,62 +1,19 @@
-#############################
-# Indexer instance profile
-#
-# Includes policies for the indexers:
-#  * Same policies as the default instance profile
-resource "aws_iam_instance_profile" "indexer_instance_profile" {
-  name = "xdr-indexer-instance-profile"
-  path = "/instance/"
-  role = aws_iam_role.indexer_instance_role.name
-}
-
-resource "aws_iam_role"  "indexer_instance_role" {
-  name = "xdr-indexer-instance-role"
-  path = "/instance/"
-  assume_role_policy = <<EOF
-{
-    "Version": "2012-10-17",
-    "Statement": [
-      {
-        "Sid": "",
-        "Effect": "Allow",
-        "Principal": {
-          "Service": [
-            "ec2.amazonaws.com",
-            "ssm.amazonaws.com"
-            ]
-        },
-        "Action": "sts:AssumeRole"
-      }
-    ]
-  }
-EOF
-}
-
-# These 3 are the default profile attachments:
-resource "aws_iam_role_policy_attachment" "indexer_instance_AmazonEC2RoleforSSM" {
-  role       = aws_iam_role.indexer_instance_role.name
-  policy_arn = "arn:${var.aws_partition}:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
-}
-
-resource "aws_iam_role_policy_attachment" "indexer_instance_default_policy_attach" {
-  role       = aws_iam_role.indexer_instance_role.name
-  policy_arn = "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:policy/launchroles/default_instance_tag_read"
-}
-
-resource "aws_iam_role_policy_attachment" "indexer_instance_cloudwatch_policy_attach" {
-  role       = aws_iam_role.indexer_instance_role.name
-  policy_arn = "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:policy/cloudwatch_events"
+module "instance_profile" {
+  source = "../../../submodules/iam/base_instance_profile"
+  prefix = "xdr-indexer"
+  aws_partition = var.aws_partition
+  aws_account_id = var.aws_account_id
 }
 
 # Indexer Specific Policy
-resource "aws_iam_policy" "indexer_instance_policy" {
+resource "aws_iam_policy" "instance_policy" {
   name        = "indexer_instance_policy"
   path        = "/launchroles/"
   description = "This policy allows indexer-specific functions"
-  policy      = data.aws_iam_policy_document.indexer_instance_policy_doc.json
+  policy      = data.aws_iam_policy_document.instance_policy_doc.json
 }
 
-data "aws_iam_policy_document" "indexer_instance_policy_doc" {
+data "aws_iam_policy_document" "instance_policy_doc" {
   # Allow copying to S3 for frozen
   # Allow use of S3 for SmartStore
   statement {
@@ -113,9 +70,22 @@ data "aws_iam_policy_document" "indexer_instance_policy_doc" {
     ]
     resources = [ "*" ]
   }      
+
+  statement {
+    sid    = "AllowAssumeRoleToSplunkApps"
+    effect = "Allow"
+
+    actions = [
+      "sts:AssumeRole"
+    ]
+
+    resources = [
+      "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/service/splunk-apps-s3"
+    ]
+  }
 }
 
 resource "aws_iam_role_policy_attachment" "indexer_instance_policy_attach" {
-  role       = aws_iam_role.indexer_instance_role.name
-  policy_arn = aws_iam_policy.indexer_instance_policy.arn
+  role       = module.instance_profile.role_id
+  policy_arn = aws_iam_policy.instance_policy.arn
 }

+ 0 - 108
base/splunk_servers/searchhead/iam.tf

@@ -1,108 +0,0 @@
-# The moose splunk SH has additional permissions beyond the default instance
-resource "aws_iam_instance_profile" "moose_splunk_sh_instance_profile" {
-  count    = local.is_moose ? 1 : 0
-  name     = "moose-splunk-sh-instance-profile"
-  path     = "/instance/"
-  role     = aws_iam_role.moose_splunk_sh_instance_role[count.index].name
-}
-
-resource "aws_iam_role" "moose_splunk_sh_instance_role" {
-  count    = local.is_moose ? 1 : 0
-  name     = "moose-splunk-sh-instance-role"
-  path     = "/instance/"
-  assume_role_policy = jsonencode(
-  {   
-    "Version": "2012-10-17",
-    "Statement": [
-      {   
-        "Sid": "", 
-        "Effect": "Allow",
-        "Principal": {
-          "Service": [
-            "ec2.amazonaws.com"
-            ]
-        },
-        "Action": "sts:AssumeRole"
-      }
-    ]
-  })
-}
-
-data "aws_iam_policy_document" "moose_splunk_sh_policy_doc" {
-  count    = local.is_moose ? 1 : 0
-
-  # Moose splunk SH can assumerole into the C2 and mdr-prod-root-ca accounts to run the ACM audit report
-  statement {
-    sid    = "AllowAssumeRole"
-    effect = "Allow"
-
-    actions = [
-      "sts:AssumeRole"
-    ]
-
-    resources = [
-      "arn:${var.aws_partition}:iam::*:role/service/run_audit_report_role"
-    ]
-  }
-
-  # Moose splunk SH can grab the ACM audit reports
-  statement {
-    sid       = ""
-    effect    = "Allow"
-    resources = ["arn:${var.aws_partition}:s3:::xdr-ca-audit-reports"]
-
-    actions = [
-      "s3:ListBucket",
-      "s3:ListBucketVersions",
-    ]
-  }
-
-  statement {
-    sid       = ""
-    effect    = "Allow"
-    resources = ["arn:${var.aws_partition}:s3:::xdr-ca-audit-reports/*"]
-
-    actions = [
-      "s3:GetObject",
-      "s3:GetObjectVersion",
-    ]
-  }
-}
-
-resource "aws_iam_policy" "moose_splunk_sh_policy" {
-  count    = local.is_moose ? 1 : 0
-  name        = "moose_splunk_sh"
-  path        = "/"
-  policy      = data.aws_iam_policy_document.moose_splunk_sh_policy_doc[count.index].json
-}
-
-resource "aws_iam_role_policy_attachment" "moose_splunk_sh_attach" {
-  count    = local.is_moose ? 1 : 0
-  role       = aws_iam_role.moose_splunk_sh_instance_role[count.index].name
-  policy_arn = aws_iam_policy.moose_splunk_sh_policy[count.index].arn
-}
-
-resource "aws_iam_role_policy_attachment" "moose_splunk_sh_AmazonEC2RoleforSSM" {
-  count    = local.is_moose ? 1 : 0
-  role       = aws_iam_role.moose_splunk_sh_instance_role[count.index].name
-  policy_arn = "arn:${var.aws_partition}:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
-}
-
-resource "aws_iam_role_policy_attachment" "moose_splunk_sh_policy_attach_tag_read" {
-  count    = local.is_moose ? 1 : 0
-  role       = aws_iam_role.moose_splunk_sh_instance_role[count.index].name
-  policy_arn = "arn:${var.aws_partition}:iam::${var.aws_account_id}:policy/launchroles/default_instance_tag_read"
-}
-
-resource "aws_iam_role_policy_attachment" "moose_splunk_sh_policy_attach_cloudwatch" {
-  count    = local.is_moose ? 1 : 0
-  role       = aws_iam_role.moose_splunk_sh_instance_role[count.index].name
-  policy_arn = "arn:${var.aws_partition}:iam::${var.aws_account_id}:policy/cloudwatch_events"
-}
-
-#This policy needs to be create prior to creating the Salt Master
-resource "aws_iam_role_policy_attachment" "moose_splunk_sh_policy_attach_binaries" {
-  count    = local.is_moose ? 1 : 0
-  role       = aws_iam_role.moose_splunk_sh_instance_role[count.index].name
-  policy_arn = "arn:${var.aws_partition}:iam::${var.aws_account_id}:policy/launchroles/default_instance_s3_binaries"
-}

+ 69 - 0
base/splunk_servers/searchhead/iam_moose_sh_instance_profile.tf

@@ -0,0 +1,69 @@
+module "moose_instance_profile" {
+  count    = local.is_moose ? 1 : 0
+  source = "../../../submodules/iam/base_instance_profile"
+  prefix = "moose-splunk-sh"
+  aws_partition = var.aws_partition
+  aws_account_id = var.aws_account_id
+}
+
+data "aws_iam_policy_document" "moose_splunk_sh_policy_doc" {
+  count    = local.is_moose ? 1 : 0
+
+  # Moose splunk SH can assumerole into the C2 and mdr-prod-root-ca accounts to run the ACM audit report
+  statement {
+    sid    = "AllowAssumeRole"
+    effect = "Allow"
+
+    actions = [
+      "sts:AssumeRole"
+    ]
+
+    resources = [
+      "arn:${var.aws_partition}:iam::*:role/service/run_audit_report_role",
+      "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/service/splunk_apps_s3"
+    ]
+  }
+
+  # Moose splunk SH can grab the ACM audit reports
+  statement {
+    sid       = ""
+    effect    = "Allow"
+    resources = ["arn:${var.aws_partition}:s3:::xdr-ca-audit-reports"]
+
+    actions = [
+      "s3:ListBucket",
+      "s3:ListBucketVersions",
+    ]
+  }
+
+  statement {
+    sid       = ""
+    effect    = "Allow"
+    resources = ["arn:${var.aws_partition}:s3:::xdr-ca-audit-reports/*"]
+
+    actions = [
+      "s3:GetObject",
+      "s3:GetObjectVersion",
+    ]
+  }
+}
+
+resource "aws_iam_policy" "moose_splunk_sh_policy" {
+  count    = local.is_moose ? 1 : 0
+  name        = "moose_splunk_sh"
+  path        = "/"
+  policy      = data.aws_iam_policy_document.moose_splunk_sh_policy_doc[count.index].json
+}
+
+resource "aws_iam_role_policy_attachment" "moose_splunk_sh_attach" {
+  count    = local.is_moose ? 1 : 0
+  role       = module.moose_instance_profile[count.index].role_id
+  policy_arn = aws_iam_policy.moose_splunk_sh_policy[count.index].arn
+}
+
+#This policy needs to be create prior to creating the Salt Master
+resource "aws_iam_role_policy_attachment" "moose_splunk_sh_policy_attach_binaries" {
+  count    = local.is_moose ? 1 : 0
+  role       = module.moose_instance_profile[count.index].role_id
+  policy_arn = "arn:${var.aws_partition}:iam::${var.aws_account_id}:policy/launchroles/default_instance_s3_binaries"
+}

+ 43 - 0
base/splunk_servers/searchhead/iam_splunk_sh_role.tf

@@ -0,0 +1,43 @@
+module "instance_profile" {
+  count = var.create_instance_profile ? 1 : 0
+  source = "../../../submodules/iam/base_instance_profile"
+  prefix = "splunk-sh"
+  aws_partition = var.aws_partition
+  aws_account_id = var.aws_account_id
+}
+
+data "aws_iam_policy_document" "splunk_sh_policy_doc" {
+  count = var.create_instance_profile ? 1 : 0
+  statement {
+    sid    = "AllowAssumeRole"
+    effect = "Allow"
+
+    actions = [
+      "sts:AssumeRole"
+    ]
+
+    resources = [
+      "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:role/service/splunk_apps_s3"
+    ]
+  }
+}
+
+resource "aws_iam_policy" "splunk_sh_policy" {
+  count = var.create_instance_profile ? 1 : 0
+  name        = "splunk_sh"
+  path        = "/"
+  policy      = data.aws_iam_policy_document.splunk_sh_policy_doc[count.index].json
+}
+
+resource "aws_iam_role_policy_attachment" "splunk_sh_attach" {
+  count = var.create_instance_profile ? 1 : 0
+  role       = module.instance_profile[count.index].role_id
+  policy_arn = aws_iam_policy.splunk_sh_policy[count.index].arn
+}
+
+#This policy needs to be create prior to creating the Salt Master
+resource "aws_iam_role_policy_attachment" "splunk_sh_policy_attach_binaries" {
+  count = var.create_instance_profile ? 1 : 0
+  role       = module.instance_profile[count.index].role_id
+  policy_arn = "arn:${var.aws_partition}:iam::${var.aws_account_id}:policy/launchroles/default_instance_s3_binaries"
+}

+ 2 - 2
base/splunk_servers/searchhead/main.tf

@@ -36,7 +36,7 @@ resource "aws_instance" "instance" {
   instance_type = var.instance_type
   key_name = "msoc-build"
   monitoring = false
-  iam_instance_profile = local.is_moose ? "moose-splunk-sh-instance-profile" : "msoc-default-instance-profile"
+  iam_instance_profile = local.is_moose ? module.moose_instance_profile[0].profile_id : "splunk-sh-instance-profile"
 
   ami = local.ami_map[local.ami_selection]
   # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
@@ -142,7 +142,7 @@ resource "aws_instance" "instance" {
   tags = merge( var.standard_tags, var.tags, var.instance_tags, { Name = local.instance_name })
   volume_tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
 
-  depends_on = [ aws_iam_instance_profile.moose_splunk_sh_instance_profile ]
+  depends_on = [ module.moose_instance_profile, module.instance_profile ]
 }
 
 module "private_dns_record" {

+ 6 - 0
base/splunk_servers/searchhead/vars.tf

@@ -1,3 +1,9 @@
+variable "create_instance_profile" {
+  description = "Whether to create the instance profile. Can only be 'true' for one module per account."
+  type = string
+  default = true
+}
+
 variable "instance_name" {
   description = "[Optional] Override the Instance Name"
   type = string

+ 44 - 0
submodules/iam/base_instance_profile/instance_profile.tf

@@ -0,0 +1,44 @@
+resource "aws_iam_instance_profile" "instance_profile" {
+  name = "${ var.prefix }-instance-profile"
+  path = "/instance/"
+  role = aws_iam_role.instance_role.name
+}
+
+resource "aws_iam_role"  "instance_role" {
+  name = "${ var.prefix }-instance-role"
+  path = "/instance/"
+  assume_role_policy = <<EOF
+{
+    "Version": "2012-10-17",
+    "Statement": [
+      {
+        "Sid": "",
+        "Effect": "Allow",
+        "Principal": {
+          "Service": [
+            "ec2.amazonaws.com",
+            "ssm.amazonaws.com"
+            ]
+        },
+        "Action": "sts:AssumeRole"
+      }
+    ]
+  }
+EOF
+}
+
+# These 3 are the default profile attachments:
+resource "aws_iam_role_policy_attachment" "instance_AmazonEC2RoleforSSM" {
+  role       = aws_iam_role.instance_role.name
+  policy_arn = "arn:${var.aws_partition}:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
+}
+
+resource "aws_iam_role_policy_attachment" "instance_default_policy_attach" {
+  role       = aws_iam_role.instance_role.name
+  policy_arn = "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:policy/launchroles/default_instance_tag_read"
+}
+
+resource "aws_iam_role_policy_attachment" "instance_cloudwatch_policy_attach" {
+  role       = aws_iam_role.instance_role.name
+  policy_arn = "arn:${ var.aws_partition }:iam::${ var.aws_account_id }:policy/cloudwatch_events"
+}

+ 12 - 0
submodules/iam/base_instance_profile/outputs.tf

@@ -0,0 +1,12 @@
+output "profile_arn" {
+  value = aws_iam_instance_profile.instance_profile.arn
+}
+output "profile_id" {
+  value = aws_iam_instance_profile.instance_profile.id
+}
+output "role_arn" {
+  value = aws_iam_role.instance_role.arn
+}
+output "role_id" {
+  value = aws_iam_role.instance_role.id
+}

+ 7 - 0
submodules/iam/base_instance_profile/variables.tf

@@ -0,0 +1,7 @@
+variable "prefix" {
+  description = "Prefix for role/policy naming"
+  type = string
+}
+
+variable "aws_partition" { type = string }
+variable "aws_account_id" { type = string }