# Set common variables for the environment. This is automatically pulled in in the root terragrunt.hcl configuration to # feed forward to the child modules. locals { environment = "prod" transit_gateway_account_name = "mdr-prod-c2" # Which account has the transit gateway environment_tags = { "Schedule" = "none", Environment = local.environment } proxy = "proxy.pvt.xdr.accenturefederalcyber.com" proxy_ip = "10.80.101.166" salt_master = "salt-master.pvt.xdr.accenturefederalcyber.com" salt_master_ip = "10.80.101.170" hec = "moose-hec.xdr.accenturefederalcyber.com" hec_pub = "moose-hec.xdr.accenturefederalcyber.com" hec_pub_ack = "moose-hec-ack.xdr.accenturefederalcyber.com" aws_waf_logs_hec_token = "FA38B8DD-1398-46D0-BD79-BA046C6064B0" # HEC token for posting WAF logs to the moose HEC # When there are multiples, put govcloud first, then commercial, and alternate if there are more than 2. # Put any standalone IPs at the end. cidr_map = { "vpc-splunk" = [ "10.40.16.0/22", # Splunk -- **MOOSE** "10.80.0.0/16", # legacy moose subnet ], "vpc-access" = [ "10.40.20.0/22", # VPN, bastions (if any), etc. "10.80.101.133/32", # legacy bastion "10.80.101.126/32", # legacy openvpn ], "vpc-public" = [ "10.40.24.0/22", # Public sites (github, ghe-backup, jira, ...) "10.80.101.250/32", # legacy jira "10.80.101.78/32", # legacy github ], "vpc-scanners" = [ "10.40.12.0/22", "10.80.1.44/32", # Legacy Qualys #1 "10.80.1.103/32", # Legacy Qualys #2 ], # Qualys, etc. "vpc-system-services" = [ "10.32.0.0/22", # Internal services such as dns, mailrelay, etc. "10.40.0.0/22", "10.80.101.230/32", # legacy sensu "10.80.101.170/32", # legacy salt master "10.80.101.166/32", # legacy proxy "10.80.101.197/32", # legacy repo "10.80.1.107/32", # legacy smtp ], "vpc-private-services" = [ "10.40.28.0/22", # Private Services - fm-shared-search, qcompliance, phantom, etc. "10.80.101.221/32", # Phantom - legacy account production "10.80.0.0/16", # the whole legacy infra VPC, so the sync lambda can sync (this is temp) # (wes made me do it honest) ], # "old" mappings before architecture planning... we should eliminate these. "bastions" = [ "10.80.101.133/32", "10.40.20.0/22" ], # vpc-access in mdr-prod-c2-gov "vpns" = [ "10.80.101.126/32", "10.40.20.0/22" ], # vpc-access in mdr-prod-c2-gov "scanners" = [ "10.40.12.0/22" ], # vpc-qualys "dns" = [ "10.40.0.0/22", "10.32.0.0/22" ], # vpc-system-services in commercial nad gov "monitoring" = [ "10.80.101.230/32", "10.40.0.0/22" ], # legacy sensu, and vpc-system-services in gov "salt" = [ "10.80.101.170/32", "10.40.0.0/22" ], # legacy salt-master, and vpc-system-services in gov "web" = [ "10.80.101.166/32", "10.80.101.197/32", "10.40.0.0/22" ], # legacy proxy/repo, and vpc-system-services in gov "moose" = [ "10.80.0.0/16", "10.40.16.0/22" ], # legacy vpc, and vpc-system-services in gov } legacy_account = "477548533976" c2_accounts = { "aws-us-gov" = "721817724804" # mdr-prod-c2-gov "aws" = "045312110490" # mdr-prod-c2 } dns_servers = [ "10.40.2.77", "10.40.2.228" ] inbound_resolver_endpoints = [ "10.40.0.198", "10.40.0.64", ] dns_info = { "private" = { zone = "pvt.xdr.accenturefederalcyber.com", zone_id = "Z08498911YSZW4A0XN4AG" } "reverse" = { zone = "10.in-addr.arpa" zone_id = "Z08395981DXDBY6CVJTW1" } "public" = { zone = "xdr.accenturefederalcyber.com" zone_id = "Z0083657A94URZM2TM87" }, "legacy_private" = { zone = "msoc.defpoint.local" zone_id = "Z2JVOIKXZP64QP" }, "legacy_public" = { zone = "mdr.defpoint.com" zone_id = "Z2HYR9YEZ4KLDE" }, "legacy_public_internal" = { # Weird name, but this is the 'private' copy of the public domain zone = "mdr.defpoint.com" zone_id = "Z2RGT77XQU1QBX" }, } aws_flowlogs_hec_token = "4a2cacb2-fea1-4328-8f25-9bef26333e91" # Legacy DNS dns_private = { "id" = "Z2JVOIKXZP64QP" "name" = "msoc.defpoint.local" } dns_private2 = { # There are many of these... future task to figure it out "id" = "Z2RGT77XQU1QBX" "name" = "mdr.defpoint.com" } dns_public = { "id" = "Z2HYR9YEZ4KLDE" "name" = "mdr.defpoint.com" } # Provide some legacy DNS entries so that systems we build # don't have to be rebuilt when we migrate the supporting systems. # Idea here is just to build entries for those systems we need during # the transition. # # When you migrate one of the systems below: # 1) Remove the entry from this list. # 2) Reapply the legacy-mdr-*/026-legacy-dns-entries module. # 3) Create a new entry in the module with which you're creating the new instance. legacy_private_dns = { #"moose-splunk-cm" = "10.80.101.77", # Needed for Universal Forwarder #"moose-splunk-sh" = "10.80.101.65", # Needed for xdr-inventory #"jira-server" = "10.80.101.250", #"mailrelay" = "10.80.1.107", #"openvpn" = "10.100.0.129", #"phantom" = "10.80.101.221", #"proxy" = "10.80.101.166", #"reposerver" = "10.80.101.197", #"sensu" = "10.80.101.230", #"splunk-mc" = "10.80.1.27", #"vault-1" = "10.80.1.134", #"vault-2" = "10.80.2.236", #"vault-3" = "10.80.3.61", "salt-master-legacy" = "10.80.101.170", } legacy_private_cname_dns = { #"iratemoses" = "internal-moose-internal-187462540.us-east-1.elb.amazonaws.com" } legacy_public_dns = { #"proxy" = "35.153.103.164", #"reposerver" = "18.234.16.205", "salt-master-legacy" = "52.5.165.105", #"sensu" = "52.6.95.246", } # cnames only legacy_public_cname_dns = { #"iratemoses" = "moose-external-1850541289.us-east-1.elb.amazonaws.com", #"portal" = "portal-alb-prod-1353134871.us-east-1.elb.amazonaws.com" } }