123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129 |
- ######################################
- # The fair queueing module example
- module "sqs_fair_queue" {
- source = "./module_sqs_fair_queueing"
- source_sqs = aws_sqs_queue.queue
- #deadletter = aws_sqs_queue.deadletter
- sqs_prefix = local.sqs_prefix
- num_queues = 16
- hash_jsonpath = "$" # This will evenly distribute all messages
- debug = true
- botodebug = false
- tags = local.tags
- }
- ######################################
- # Example Resources for testing
- # tfsec:ignore:aws-s3-enable-bucket-logging Logging is a good idea, but we don't here.
- # tfsec:ignore:aws-s3-enable-versioning Versioning is a good idea, but we don't here.
- resource "aws_s3_bucket" "bucket" {
- bucket = "fdamstra-fair-queueing-test"
- force_destroy = true # CHANGE FOR PRODUCTION!
- tags = merge(local.tags, {
- Name = "mbox-fair-queueing-test"
- Environment = "Dev"
- Purpose = "POC bucket for S3 fair queueing"
- })
- }
- resource "aws_s3_bucket_acl" "bucket" {
- bucket = aws_s3_bucket.bucket.id
- acl = "private"
- }
- resource "aws_s3_bucket_public_access_block" "bucket" {
- bucket = aws_s3_bucket.bucket.id
- block_public_acls = true
- block_public_policy = true
- ignore_public_acls = true
- restrict_public_buckets = true
- }
- # tfsec:ignore:aws-s3-encryption-customer-key AWS managed key is sufficient
- resource "aws_s3_bucket_server_side_encryption_configuration" "bucket" {
- bucket = aws_s3_bucket.bucket.bucket
- rule {
- apply_server_side_encryption_by_default {
- sse_algorithm = "AES256"
- }
- }
- }
- # SQS configuration for the root bucket
- #
- # NOTE! Only this first sqs needs to be set up.
- # The module will set up the sqs queues for FIFO.
- #
- # Remember that the consumer service needs access to the FIFO queues,
- # not these.
- resource "aws_sqs_queue" "queue" {
- name = "mbox-bucket-notification"
- sqs_managed_sse_enabled = true
- policy = <<POLICY
- {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": "*",
- "Action": "sqs:SendMessage",
- "Resource": "arn:aws:sqs:*:*:mbox-bucket-notification",
- "Condition": {
- "ArnEquals": { "aws:SourceArn": "${aws_s3_bucket.bucket.arn}" }
- }
- }
- ]
- }
- POLICY
- redrive_policy = jsonencode({
- deadLetterTargetArn = aws_sqs_queue.deadletter.arn
- maxReceiveCount = 4
- })
- # NOTE: If you set this below about 15, then you must decrease how many
- # messages are processed per batch by lambda.
- visibility_timeout_seconds = 30
- depends_on = [aws_s3_bucket.bucket]
- tags = local.tags
- }
- resource "aws_sqs_queue" "deadletter" {
- name = "mbox-bucket-notification-dlq"
- sqs_managed_sse_enabled = true
- }
- resource "aws_sqs_queue_redrive_allow_policy" "deadletter" {
- queue_url = aws_sqs_queue.deadletter.id
- redrive_allow_policy = jsonencode({
- redrivePermission = "allowAll" # Must allow all if > 9 bins
- #sourceQueueArns = [aws_sqs_queue.queue.arn, local.sqs_wildcard_arn]
- #sourceQueueArns = concat([aws_sqs_queue.queue.arn], module.sqs_fair_queue.arns)
- })
- }
- resource "aws_s3_bucket_notification" "bucket_notification" {
- count = local.fair_queueing_enabled ? 1 : 0
- bucket = aws_s3_bucket.bucket.id
- queue {
- queue_arn = aws_sqs_queue.queue.arn
- events = ["s3:ObjectCreated:*"]
- filter_prefix = "incoming/"
- }
- depends_on = [aws_sqs_queue.queue, aws_s3_bucket.bucket]
- }
|