123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272 |
- # Some instance variables
- locals {
- ami_selection = "minion" # master, minion, ...
- instance_name = "${ var.prefix }-splunk-cm"
- is_moose = length(regexall("moose", var.prefix)) > 0 ? true : false
- }
- # Rather than pass in the aws security group, we just look it up. This will
- # probably be useful other places, as well.
- data "aws_security_group" "typical-host" {
- name = "typical-host"
- vpc_id = var.vpc_id
- }
- # Use the default EBS key
- data "aws_kms_key" "ebs-key" {
- key_id = "alias/ebs_root_encrypt_decrypt"
- }
- resource "aws_network_interface" "instance" {
- subnet_id = var.subnets[0]
- security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.cluster_master_security_group.id ]
- description = local.instance_name
- tags = merge(var.standard_tags, var.tags, { Name = local.instance_name })
- }
- resource "aws_instance" "instance" {
- #availability_zone = var.azs[count.index % 2]
- tenancy = "default"
- ebs_optimized = true
- disable_api_termination = var.instance_termination_protection
- instance_initiated_shutdown_behavior = "stop"
- instance_type = var.instance_type
- key_name = "msoc-build"
- monitoring = false
- iam_instance_profile = "msoc-default-instance-profile"
- ami = local.ami_map[local.ami_selection]
- # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
- # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
- # that could be removed.
- lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
- # These device definitions are optional, but added for clarity.
- root_block_device {
- volume_type = "gp2"
- volume_size = var.splunk_volume_sizes["cluster_master"]["/"]
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- }
- ebs_block_device {
- # /opt/splunk
- # Note: Not in AMI
- device_name = "/dev/xvdf"
- volume_size = var.splunk_volume_sizes["cluster_master"]["/opt/splunk"]
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- }
-
- ebs_block_device {
- # swap
- device_name = "/dev/xvdm"
- volume_size = var.splunk_volume_sizes["cluster_master"]["swap"]
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
- # This may prompt replacement when the AMI is updated.
- # See:
- # https://github.com/hashicorp/terraform/issues/19958
- # https://github.com/terraform-providers/terraform-provider-aws/issues/13118
- snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
- }
- ebs_block_device {
- # /home
- device_name = "/dev/xvdn"
- volume_size = var.splunk_volume_sizes["cluster_master"]["/home"]
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
- }
- ebs_block_device {
- # /var
- device_name = "/dev/xvdo"
- volume_size = var.splunk_volume_sizes["cluster_master"]["/var"]
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
- }
- ebs_block_device {
- # /var/tmp
- device_name = "/dev/xvdp"
- volume_size = var.splunk_volume_sizes["cluster_master"]["/var/tmp"]
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
- }
- ebs_block_device {
- # /var/log
- device_name = "/dev/xvdq"
- volume_size = var.splunk_volume_sizes["cluster_master"]["/var/log"]
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
- }
- ebs_block_device {
- # /var/log/audit
- device_name = "/dev/xvdr"
- volume_size = var.splunk_volume_sizes["cluster_master"]["/var/log/audit"]
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
- }
- ebs_block_device {
- # /tmp
- device_name = "/dev/xvds"
- volume_size = var.splunk_volume_sizes["cluster_master"]["/tmp"]
- delete_on_termination = true
- encrypted = true
- kms_key_id = data.aws_kms_key.ebs-key.arn
- snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
- }
- network_interface {
- device_index = 0
- network_interface_id = aws_network_interface.instance.id
- }
- user_data = data.template_cloudinit_config.cloud-init.rendered
- tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
- volume_tags = merge( var.standard_tags, var.tags, { Name = local.instance_name })
- }
- module "private_dns_record" {
- source = "../../../submodules/dns/private_A_record"
- name = local.instance_name
- ip_addresses = [ aws_instance.instance.private_ip ]
- dns_info = var.dns_info
- reverse_enabled = var.reverse_enabled
- providers = {
- aws.c2 = aws.c2
- }
- }
- data "template_file" "cloud-init" {
- # Should these be in a common directory? I suspect they'd be reusable
- template = file("${path.module}/cloud-init/cloud-init.tpl")
- vars = {
- hostname = local.instance_name
- fqdn = "${local.instance_name}.${var.dns_info["private"]["zone"]}"
- splunk_prefix = var.prefix
- environment = var.environment
- salt_master = var.salt_master
- proxy = var.proxy
- aws_partition = var.aws_partition
- aws_partition_alias = var.aws_partition_alias
- aws_region = var.aws_region
- }
- }
- # Render a multi-part cloud-init config making use of the part
- # above, and other source files
- data "template_cloudinit_config" "cloud-init" {
- gzip = true
- base64_encode = true
- # Main cloud-config configuration file.
- part {
- filename = "init.cfg"
- content_type = "text/cloud-config"
- content = data.template_file.cloud-init.rendered
- }
- # mount /dev/xvdf at /opt/splunk
- part {
- content_type = "text/cloud-boothook"
- content = file("${path.module}/cloud-init/opt_splunk.boothook")
- }
- }
- ## Cluster Master Security Group
- #
- # Summary:
- # Ingress:
- # tcp/8000 - Splunk Web - vpc-access, legacy openvpn, legacy bastion
- # tcp/8089 - Splunk API - vpc-access, legacy openvpn, legacy bastion
- # tcp/8089 - Splunk API + IDX Discovery - Entire VPC + var.splunk_legacy_cidr
- # tcp/8089 - MOOSE ONLY - 10.0.0.0/8
- # Egress:
- # tcp/8089 - Splunk API + IDX Discovery - Entire VPC + var.splunk_legacy_cidr
- # tcp/9997-9998 - Splunk Data - Entire VPC + var.splunk_legacy_cidr
- #
- # In legacy, but not carried over:
- # Ingress:
- # tcp/9887 - IDX Replication - Entire VPC + var.splunk_legacy_cidr
- # tcp/8088 - Splunk HEC - Entire VPC + var.additional_source + var.splunk_legacy_cidr - TODO: Is this needed for CM?
- # tcp/9997-9998 - Splunk Data - Entire VPC + var.additional_source + var.splunk_legacy_cidr
- # Egress:
- # tcp/8088 - Entire VPC - Splunk HEC
- # tcp/9997-9998 - Entire VPC - Splunk Data
- # tcp/9887 - Entire VPC - IDX Replication
- #
- resource "aws_security_group" "cluster_master_security_group" {
- name = "cluster_master_security_group"
- description = "Security Group for Splunk Cluster Master Instance(s)"
- vpc_id = var.vpc_id
- tags = merge(var.standard_tags, var.tags)
- }
- resource "aws_security_group_rule" "splunk-web-in" {
- description = "Web access from bastions and vpn"
- type = "ingress"
- from_port = 8000
- to_port = 8000
- protocol = "tcp"
- #cidr_blocks = toset(concat(var.cidr_map["bastions"], var.cidr_map["vpns"]))
- cidr_blocks = var.cidr_map["vpc-access"]
- security_group_id = aws_security_group.cluster_master_security_group.id
- }
- resource "aws_security_group_rule" "splunk-api-in" {
- description = "Splunk API + Indexer Discovery"
- type = "ingress"
- from_port = 8089
- to_port = 8089
- protocol = "tcp"
- cidr_blocks = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ], var.cidr_map["vpc-access"]))
- security_group_id = aws_security_group.cluster_master_security_group.id
- }
- resource "aws_security_group_rule" "splunk-api-in-moose" {
- count = local.is_moose ? 1 : 0
- description = "Splunk API + Indexer Discovery - 10/8 for MOOSE ONLY"
- type = "ingress"
- from_port = 8089
- to_port = 8089
- protocol = "tcp"
- cidr_blocks = [ "10.0.0.0/8" ]
- security_group_id = aws_security_group.cluster_master_security_group.id
- }
- resource "aws_security_group_rule" "splunk-api-out" {
- description = "Splunk API Outbound to talk to indexers"
- type = "egress"
- from_port = 8089
- to_port = 8089
- protocol = "tcp"
- cidr_blocks = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ]))
- security_group_id = aws_security_group.cluster_master_security_group.id
- }
- resource "aws_security_group_rule" "splunk-data-out" {
- description = "Splunk Data Outbound to record to local indexers"
- type = "egress"
- from_port = 9997
- to_port = 9998
- protocol = "tcp"
- cidr_blocks = toset(concat(var.splunk_legacy_cidr, [ var.vpc_cidr ]))
- security_group_id = aws_security_group.cluster_master_security_group.id
- }
|