Pārlūkot izejas kodu

Updates Thirdparty Modules and Account Standards C2 to be AWS 4.x compatible

The C2 work may be incomplete. This should be all that's needed for
`006-account-standards`.

To be tagged v4.0.2
Fred Damstra [afs macbook] 3 gadi atpakaļ
vecāks
revīzija
2105f1a93c

+ 24 - 14
base/account_standards_c2/config_bucket.tf

@@ -19,28 +19,38 @@ module "xdr_config_logging_bucket" {
 
 resource "aws_s3_bucket" "xdr_config_bucket" {
   bucket = "xdr-config-${var.environment}"
-  acl    = "private"
   tags   = merge(var.standard_tags, var.tags)
+}
 
-  versioning {
-    enabled = true
-  }
+resource "aws_s3_bucket_acl" "xdr_config_bucket" {
+  bucket = aws_s3_bucket.xdr_config_bucket.id
+  acl    = "private"
+}
 
-  logging {
-    target_bucket = module.xdr_config_logging_bucket.s3_bucket_name
-    target_prefix = "${var.aws_account_id}-${var.aws_region}-awsconfig/"
-  }
+resource "aws_s3_bucket_server_side_encryption_configuration" "xdr_config_bucket" {
+  bucket = aws_s3_bucket.xdr_config_bucket.id
 
-  server_side_encryption_configuration {
-    rule {
-      apply_server_side_encryption_by_default {
-        sse_algorithm = "aws:kms"
-        kms_master_key_id = aws_kms_key.config_encryption.arn
-      }
+  rule {
+    apply_server_side_encryption_by_default {
+      sse_algorithm = "aws:kms"
+      kms_master_key_id = aws_kms_key.config_encryption.arn
     }
   }
 }
 
+resource "aws_s3_bucket_logging" "xdr_config_bucket" {
+  bucket        = aws_s3_bucket.xdr_config_bucket.id
+  target_bucket = module.xdr_config_logging_bucket.s3_bucket_name
+  target_prefix = "${var.aws_account_id}-${var.aws_region}-awsconfig/"
+}
+
+resource "aws_s3_bucket_versioning" "xdr_config_bucket" {
+  bucket = aws_s3_bucket.xdr_config_bucket.id
+  versioning_configuration {
+    status = "Enabled"
+  }
+}
+
 resource "aws_s3_bucket_public_access_block" "awsconfig_bucket_block_public_access" {
   block_public_acls       = true
   block_public_policy     = true

+ 3 - 3
base/account_standards_c2/elb_bucket.tf

@@ -38,8 +38,8 @@ resource "aws_s3_bucket_versioning" "s3_version_elb_logging_bucket" {
   }
 }
 
-resource "aws_s3_bucket_logging" "log_bucket_elb_logging_bucket" {
-  bucket        = aws_s3_bucket.log_bucket_elb_logging_bucket.id
+resource "aws_s3_bucket_logging" "elb_logging_bucket" {
+  bucket        = aws_s3_bucket.elb_logging_bucket.id
   target_bucket = module.elb_logging_logging_bucket.s3_bucket_name
   target_prefix = "${var.aws_account_id}-${var.aws_region}-elblogs/"
 }
@@ -281,4 +281,4 @@ resource "aws_sns_topic_subscription" "elblog_bucket_change_notification_to_queu
     }
   }
 }
-*/
+*/

+ 44 - 36
thirdparty/terraform-aws-cloudtrail-bucket/main.tf

@@ -20,63 +20,71 @@ locals {
 
 resource "aws_s3_bucket" "this" {
   bucket = local.bucket_name
-  acl    = "private"
   tags   = var.tags
 
-  # If we want to PR this upstream, we have to find a way to leave this enabled, but we don't need/want
-  # it since we import into Splunk.
-  #lifecycle_rule {
-  #  enabled = true
-  #
-  #  transition {
-  #    days          = 30
-  #    storage_class = "STANDARD_IA"
-  #  }
-  #
-  #}
-
-  dynamic "lifecycle_rule" {
-    iterator = rule
+  lifecycle {
+    prevent_destroy = true
+  }
+}
+
+resource "aws_s3_bucket_lifecycle_configuration" "this" {
+  bucket = aws_s3_bucket.this.id
+  count = length(var.lifecycle_rules) > 0 ? 1 : 0 # handle the case of no lifecycle rules
+
+  dynamic "rule" {
     for_each = var.lifecycle_rules
 
     content {
       id      = rule.value.id
-      enabled = rule.value.enabled
-      prefix  = lookup(rule.value, "prefix", null)
-      abort_incomplete_multipart_upload_days = lookup(rule.value, "abort_incomplete_multipart_upload_days", 0)
+      status  = rule.value.enabled == true ? "Enabled" : "Disabled"
+      
+      filter {
+        prefix  = lookup(rule.value, "prefix", null)
+      }
+
+      abort_incomplete_multipart_upload {
+        days_after_initiation = lookup(rule.value, "abort_incomplete_multipart_upload_days", 0)
+      }
 
       expiration {
         days = lookup(rule.value, "expiration", 2147483647)
       }
 
       noncurrent_version_expiration {
-        days = lookup(rule.value, "noncurrent_version_expiration", 2147483647)
+        noncurrent_days = lookup(rule.value, "noncurrent_version_expiration", 2147483647)
       }
     }
   }
+}
 
-  logging {
-    target_bucket = var.logging_bucket
-    target_prefix = "${local.account_id}-${var.region}-cloudtrail/"
-  }
+resource "aws_s3_bucket_logging" "this" {
+  bucket        = aws_s3_bucket.this.id
+  target_bucket = var.logging_bucket
+  target_prefix = "${local.account_id}-${var.region}-cloudtrail/"
+}
 
-  server_side_encryption_configuration {
-    rule {
-      apply_server_side_encryption_by_default {
-        sse_algorithm     = "aws:kms"
-        kms_master_key_id = aws_kms_key.this.arn
-      }
-    }
-  }
+resource "aws_s3_bucket_versioning" "this" {
+  bucket = aws_s3_bucket.this.id
 
-  versioning {
-    enabled = true
+  versioning_configuration {
+    status = "Enabled"
   }
+}
 
-  lifecycle {
-    prevent_destroy = true
-  }
+resource "aws_s3_bucket_acl" "this" {
+  bucket = aws_s3_bucket.this.id
+  acl    = "private"
+}
 
+resource "aws_s3_bucket_server_side_encryption_configuration" "kinesis_firehose_s3_bucket" {
+  bucket = aws_s3_bucket.this.id
+
+  rule {
+    apply_server_side_encryption_by_default {
+      sse_algorithm     = "aws:kms"
+      kms_master_key_id = aws_kms_key.this.arn
+    }
+  }
 }
 
 resource "aws_s3_bucket_public_access_block" "this" {

+ 31 - 13
thirdparty/terraform-aws-kinesis-firehose-splunk/main.tf

@@ -50,30 +50,48 @@ resource "aws_kinesis_firehose_delivery_stream" "kinesis_firehose" {
 # S3 Bucket for Kinesis Firehose s3_backup_mode
 resource "aws_s3_bucket" "kinesis_firehose_s3_bucket" {
   bucket = var.s3_bucket_name
+
+  tags = var.tags
+}
+
+resource "aws_s3_bucket_acl" "kinesis_firehose_s3_bucket" {
+  bucket = aws_s3_bucket.kinesis_firehose_s3_bucket.id
   acl    = "private"
+}
 
-  server_side_encryption_configuration {
-    rule {
-      apply_server_side_encryption_by_default {
-        sse_algorithm = "AES256"
-      }
+resource "aws_s3_bucket_server_side_encryption_configuration" "kinesis_firehose_s3_bucket" {
+  bucket = aws_s3_bucket.kinesis_firehose_s3_bucket.id
+
+  rule {
+    apply_server_side_encryption_by_default {
+      sse_algorithm     = "AES256"
     }
   }
+}
+
+resource "aws_s3_bucket_lifecycle_configuration" "kinesis_firehose_s3_bucket" {
+  bucket = aws_s3_bucket.kinesis_firehose_s3_bucket.id
+
+  rule {
+    id     = "expire-old-logs"
+    status = "Enabled"
+
+    filter {
+      prefix = ""
+    }
 
-  lifecycle_rule {
-    id                            = "expire-old-logs"
-    enabled                       = true
-    prefix                        = ""
     expiration {
       days = var.s3_expiration
     }
+
     noncurrent_version_expiration {
-      days = var.s3_expiration
+      noncurrent_days = var.s3_expiration
     }
-    abort_incomplete_multipart_upload_days = 7
-  }
 
-  tags = var.tags
+    abort_incomplete_multipart_upload {
+      days_after_initiation = 7
+    }
+  }
 }
 
 resource "aws_s3_bucket_public_access_block" "kinesis_firehose_s3_bucket" {

+ 54 - 35
thirdparty/terraform-aws-s3logging-bucket/main.tf

@@ -16,56 +16,75 @@ locals {
 
 resource "aws_s3_bucket" "this" {
   bucket = local.bucket_name
-  acl    = "log-delivery-write"
   tags   = var.tags
+}
+
+resource "aws_s3_bucket_acl" "log_bucket_acl" {
+  bucket = aws_s3_bucket.this.id
+  acl    = "log-delivery-write"
+}
+
+resource "aws_s3_bucket_logging" "this" {
+  bucket = aws_s3_bucket.this.id
+
+  # Conformance Pack for CIS requires access logs on all S3 buckets and is a best
+  # practice.
+  #
+  # Logging to the bucket itself is allowed, but if we ingest into splunk, make 
+  # sure we don't set up a feedback loop (splunk accesses s3 bucket to get a log
+  # which creates a log which leads to splunk accessing the s3 bucket)
+  target_bucket = local.bucket_name
+  target_prefix = "${data.aws_caller_identity.current.account_id}-${data.aws_region.current.name}-${local.bucket_name}"
+}
+
+
+resource "aws_s3_bucket_versioning" "this" {
+  bucket = aws_s3_bucket.this.id
+
+  versioning_configuration {
+    status = var.versioning_enabled == true ? "Enabled" : "Suspended"
+  }
+}
+
+
+resource "aws_s3_bucket_server_side_encryption_configuration" "this" {
+  bucket = aws_s3_bucket.this.id
 
-  dynamic "lifecycle_rule" {
-    iterator = rule
+  rule {
+    apply_server_side_encryption_by_default {
+      sse_algorithm = "aws:kms"
+    }
+  }
+}
+
+resource "aws_s3_bucket_lifecycle_configuration" "this" {
+  bucket = aws_s3_bucket.this.id
+  count = length(var.lifecycle_rules) > 0 ? 1 : 0 # handle the case of no lifecycle rules
+
+  dynamic "rule" {
     for_each = var.lifecycle_rules
 
     content {
       id      = rule.value.id
-      enabled = rule.value.enabled
-      prefix  = lookup(rule.value, "prefix", null)
-      abort_incomplete_multipart_upload_days = lookup(rule.value, "abort_incomplete_multipart_upload_days", 0)
+      status  = rule.value.enabled == true ? "Enabled" : "Disabled"
+
+      abort_incomplete_multipart_upload {
+        days_after_initiation = lookup(rule.value, "abort_incomplete_multipart_upload_days", 0)
+      }
+
+      filter {
+        prefix  = lookup(rule.value, "prefix", null)
+      }
 
       expiration {
         days = lookup(rule.value, "expiration", 2147483647)
       }
 
       noncurrent_version_expiration {
-        days = lookup(rule.value, "noncurrent_version_expiration", 2147483647)
+        noncurrent_days = lookup(rule.value, "noncurrent_version_expiration", 2147483647)
       }
     }
   }
-
-  server_side_encryption_configuration {
-    rule {
-      apply_server_side_encryption_by_default {
-        sse_algorithm = "aws:kms"
-      }
-    }
-  }
-
-  versioning {
-    enabled = var.versioning_enabled
-  }
-
-  lifecycle {
-    ignore_changes = [versioning[0].mfa_delete]
-  }
-
-  # Conformance Pack for CIS requires access logs on all S3 buckets and is a best
-  # practice.
-  #
-  # Logging to the bucket itself is allowed, but if we ingest into splunk, make 
-  # sure we don't set up a feedback loop (splunk accesses s3 bucket to get a log
-  # which creates a log which leads to splunk accessing the s3 bucket)
-  logging {
-    target_bucket = local.bucket_name
-    target_prefix = "${data.aws_caller_identity.current.account_id}-${data.aws_region.current.name}-${local.bucket_name}"
-  }
-
 }
 
 resource "aws_s3_bucket_public_access_block" "this" {