main.tf 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. ######################################
  2. # The fair queueing module example
  3. module "sqs_fair_queue" {
  4. source = "./module_sqs_fair_queueing"
  5. source_sqs = aws_sqs_queue.queue
  6. #deadletter = aws_sqs_queue.deadletter
  7. sqs_prefix = local.sqs_prefix
  8. num_queues = 16
  9. hash_jsonpath = "$" # This will evenly distribute all messages
  10. debug = true
  11. botodebug = false
  12. tags = local.tags
  13. }
  14. ######################################
  15. # Example Resources for testing
  16. # tfsec:ignore:aws-s3-enable-bucket-logging Logging is a good idea, but we don't here.
  17. # tfsec:ignore:aws-s3-enable-versioning Versioning is a good idea, but we don't here.
  18. resource "aws_s3_bucket" "bucket" {
  19. bucket = "fdamstra-fair-queueing-test"
  20. force_destroy = true # CHANGE FOR PRODUCTION!
  21. tags = merge(local.tags, {
  22. Name = "mbox-fair-queueing-test"
  23. Environment = "Dev"
  24. Purpose = "POC bucket for S3 fair queueing"
  25. })
  26. }
  27. resource "aws_s3_bucket_acl" "bucket" {
  28. bucket = aws_s3_bucket.bucket.id
  29. acl = "private"
  30. }
  31. resource "aws_s3_bucket_public_access_block" "bucket" {
  32. bucket = aws_s3_bucket.bucket.id
  33. block_public_acls = true
  34. block_public_policy = true
  35. ignore_public_acls = true
  36. restrict_public_buckets = true
  37. }
  38. # tfsec:ignore:aws-s3-encryption-customer-key AWS managed key is sufficient
  39. resource "aws_s3_bucket_server_side_encryption_configuration" "bucket" {
  40. bucket = aws_s3_bucket.bucket.bucket
  41. rule {
  42. apply_server_side_encryption_by_default {
  43. sse_algorithm = "AES256"
  44. }
  45. }
  46. }
  47. # SQS configuration for the root bucket
  48. #
  49. # NOTE! Only this first sqs needs to be set up.
  50. # The module will set up the sqs queues for FIFO.
  51. #
  52. # Remember that the consumer service needs access to the FIFO queues,
  53. # not these.
  54. resource "aws_sqs_queue" "queue" {
  55. name = "mbox-bucket-notification"
  56. sqs_managed_sse_enabled = true
  57. policy = <<POLICY
  58. {
  59. "Version": "2012-10-17",
  60. "Statement": [
  61. {
  62. "Effect": "Allow",
  63. "Principal": "*",
  64. "Action": "sqs:SendMessage",
  65. "Resource": "arn:aws:sqs:*:*:mbox-bucket-notification",
  66. "Condition": {
  67. "ArnEquals": { "aws:SourceArn": "${aws_s3_bucket.bucket.arn}" }
  68. }
  69. }
  70. ]
  71. }
  72. POLICY
  73. redrive_policy = jsonencode({
  74. deadLetterTargetArn = aws_sqs_queue.deadletter.arn
  75. maxReceiveCount = 4
  76. })
  77. # NOTE: If you set this below about 15, then you must decrease how many
  78. # messages are processed per batch by lambda.
  79. visibility_timeout_seconds = 30
  80. depends_on = [aws_s3_bucket.bucket]
  81. tags = local.tags
  82. }
  83. resource "aws_sqs_queue" "deadletter" {
  84. name = "mbox-bucket-notification-dlq"
  85. sqs_managed_sse_enabled = true
  86. }
  87. resource "aws_sqs_queue_redrive_allow_policy" "deadletter" {
  88. queue_url = aws_sqs_queue.deadletter.id
  89. redrive_allow_policy = jsonencode({
  90. redrivePermission = "allowAll" # Must allow all if > 9 bins
  91. #sourceQueueArns = [aws_sqs_queue.queue.arn, local.sqs_wildcard_arn]
  92. #sourceQueueArns = concat([aws_sqs_queue.queue.arn], module.sqs_fair_queue.arns)
  93. })
  94. }
  95. resource "aws_s3_bucket_notification" "bucket_notification" {
  96. count = local.fair_queueing_enabled ? 1 : 0
  97. bucket = aws_s3_bucket.bucket.id
  98. queue {
  99. queue_arn = aws_sqs_queue.queue.arn
  100. events = ["s3:ObjectCreated:*"]
  101. filter_prefix = "incoming/"
  102. }
  103. depends_on = [aws_sqs_queue.queue, aws_s3_bucket.bucket]
  104. }