Pārlūkot izejas kodu

Merge branch 'master' of https://github.xdr.accenturefederalcyber.com/mdr-engineering/xdr-terraform-modules

Brad Poulton 4 gadi atpakaļ
vecāks
revīzija
98cdb0560f

+ 14 - 0
base/keycloak/nlb.tf

@@ -15,6 +15,20 @@ module "public_dns_record" {
   }
 }
 
+#module "public_dns_record_2" {
+#  # A second dns record that can be used after configuring the XDR realm
+#  source = "../../submodules/dns/public_ALIAS_record"
+#
+#  name = "auth.${var.dns_info["public"]["zone"]}"
+#  target_dns_name = aws_lb.external.dns_name
+#  target_zone_id  = aws_lb.external.zone_id
+#  dns_info = var.dns_info
+#
+#  providers = {
+#    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+#  }
+#}
+
 resource "aws_lb" "external" {
   name = "keycloak-external-nlb"
   load_balancer_type = "network"

+ 1 - 0
base/rhsso/amis.tf

@@ -0,0 +1 @@
+../amis.tf

+ 31 - 0
base/rhsso/certificate.tf.disabled

@@ -0,0 +1,31 @@
+#Certificate 
+resource "aws_acm_certificate" "cert" {
+  domain_name       = "keycloak.${var.dns_info["public"]["zone"]}"
+  validation_method = "DNS"
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_acm_certificate_validation" "cert" {
+  certificate_arn         = aws_acm_certificate.cert.arn
+  validation_record_fqdns = [for record in aws_route53_record.cert_validation: record.fqdn]
+}
+
+resource "aws_route53_record" "cert_validation" {
+  provider = aws.mdr-common-services-commercial
+
+  for_each = {
+    for dvo in aws_acm_certificate.cert.domain_validation_options : dvo.domain_name => {
+      name   = dvo.resource_record_name
+      record = dvo.resource_record_value
+      type   = dvo.resource_record_type
+    }
+  }
+
+  allow_overwrite = true
+  name            = each.value.name
+  records         = [each.value.record]
+  ttl             = 60
+  type            = each.value.type
+  zone_id         = var.dns_info["public"]["zone_id"]
+}

+ 74 - 0
base/rhsso/cloud-init/cloud-init.tpl

@@ -0,0 +1,74 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+# but no proxy for the proxy. Commenting these out for other proxies
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.us-east-1.amazonaws.com,ec2messages.us-east-1.amazonaws.com,ec2.us-east-1.amazonaws.com,ssmmessages.us-east-1.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_region: ${ aws_region }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 96 - 0
base/rhsso/elbclassic.tf.skipped

@@ -0,0 +1,96 @@
+module "public_dns_record" {
+  source = "../../submodules/dns/public_ALIAS_record"
+
+  name = "keycloak.${var.dns_info["public"]["zone"]}"
+  target_dns_name = aws_elb.external.dns_name
+  target_zone_id  = aws_elb.external.zone_id
+  dns_info = var.dns_info
+
+  providers = {
+    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+  }
+}
+
+resource "aws_elb" "external" {
+  name = "keycloak-external-elb"
+  subnets     = var.public_subnets
+  security_groups  = [ aws_security_group.elb_external.id ]
+
+  access_logs {
+    bucket  = "xdr-elb-${ var.environment }"
+    enabled = true
+  }
+
+  # We want client certs, so SSL must be terminated on the instance
+  listener {
+    instance_port      = 8443
+    instance_protocol  = "TCP"
+    lb_port            = 443
+    lb_protocol        = "TCP"
+    #ssl_certificate_id = aws_acm_certificate.cert.arn
+  }
+
+  listener {
+    instance_port      = 80
+    instance_protocol  = "HTTP"
+    lb_port            = 8080
+    lb_protocol        = "HTTP"
+  }
+
+  health_check {
+    healthy_threshold   = 2
+    unhealthy_threshold = 2
+    timeout             = 3
+    target              = "HTTPS:8443/"
+    interval            = 10
+  }
+
+  cross_zone_load_balancing   = true
+  idle_timeout                = 300
+  connection_draining         = true
+  connection_draining_timeout = 300
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+# Create a new load balancer attachment
+resource "aws_elb_attachment" "external_attachment" {
+  count    = var.keycloak_instance_count
+  elb      = aws_elb.external.id
+  instance = aws_instance.instance[count.index].id
+}
+
+# No stickiness on TCP
+#resource "aws_lb_cookie_stickiness_policy" "external" {
+#  name          = "Stickiness"
+#  load_balancer = aws_elb.external.name
+#  lb_port       = 443
+#  cookie_expiration_period = 600
+#}
+
+# No policy on TCP
+## Seems like there should be an easier way for terraform to assign the default policy, but
+## this is how it's done according to https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/load_balancer_listener_policy
+#resource "aws_load_balancer_policy" "elb_external_ssl_policy" {
+#  load_balancer_name = aws_elb.external.name
+#  policy_name        = "CopyOfELBSecurityPolicy-TLS-1-1-2017-01"
+#  policy_type_name   = "SSLNegotiationPolicyType"
+#
+#  policy_attribute {
+#    name  = "Reference-Security-Policy"
+#    value = "ELBSecurityPolicy-TLS-1-1-2017-01" # ALBs have a (superior?) "ELBSecurityPolicy-FS-1-2-Res-2019-08", but this will have to do for ELB
+#  }
+#}
+#
+#resource "aws_load_balancer_listener_policy" "elb-external-listener-policies-443" {
+#  load_balancer_name = aws_elb.external.name
+#  load_balancer_port = 443
+#
+#  policy_names = [
+#    aws_load_balancer_policy.elb_external_ssl_policy.policy_name
+#  ]
+#}
+
+### Client Certificate Configuration
+#
+# No AWS LBs support client certificates, unfortunately.

+ 196 - 0
base/rhsso/main.tf

@@ -0,0 +1,196 @@
+# Some instance variables
+locals {
+  ami_selection = "minion" # master, minion, ...
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "instance" {
+  count = var.rhsso_instance_count
+  subnet_id = var.public_subnets[count.index % 3]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.instance.id ]
+  description = "rhsso-${count.index}"
+  tags = merge(var.standard_tags, var.tags, { Name = "rhsso-${count.index}" })
+}
+
+resource "aws_eip" "instance" {
+  count = var.rhsso_instance_count
+  vpc = true
+  tags = merge(var.standard_tags, var.tags, { Name = "rhsso-${count.index}" })
+}
+
+resource "aws_eip_association" "instance" {
+  count = var.rhsso_instance_count
+  network_interface_id = aws_network_interface.instance[count.index].id
+  allocation_id = aws_eip.instance[count.index].id
+}
+
+resource "aws_instance" "instance" {
+  count = var.rhsso_instance_count
+  
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "msoc-default-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp2"
+      #volume_size = "60"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    #volume_size = 48
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.instance[count.index].id
+  }
+
+  user_data = data.template_cloudinit_config.cloud_init_config[count.index].rendered
+  tags = merge( var.standard_tags, var.tags, { Name = "rhsso-${count.index}" })
+  volume_tags = merge( var.standard_tags, var.tags, { Name = "rhsso-${count.index}" })
+}
+
+module "private_dns_record" {
+  count = var.rhsso_instance_count
+
+  source = "../../submodules/dns/private_A_record"
+
+  name = "rhsso-${count.index}"
+  ip_addresses = [ aws_instance.instance[count.index].private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}
+
+#module "public_dns_record" {
+#  source = "../../submodules/dns/public_A_record"
+#
+#  name = var.instance_name
+#  ip_addresses = [ aws_eip.instance.public_ip ]
+#  dns_info = var.dns_info
+#
+#  providers = {
+#    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+#  }
+#}
+
+#The Cloud init data is to prepare the instance for use. 
+data "template_file" "cloud_init" {
+  count = var.rhsso_instance_count
+
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
+
+  vars = {
+    hostname = "rhsso-${count.index}"
+    fqdn = "rhsso-${count.index}.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+    aws_region = var.aws_region
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud_init_config" {
+  count = var.rhsso_instance_count
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = data.template_file.cloud_init[count.index].rendered
+  }
+}

+ 75 - 0
base/rhsso/nlb.tf

@@ -0,0 +1,75 @@
+# KeyCloak Needs an NLB:
+#   * ALB/ELB can't terminate SSL, because RHSSO needs the certificate
+#   * Because they don't terminate SSL, they can't provide X-forwarded-for, and rhsso needs the source IP
+#   * Therefore, we use an NLB and preserve the source IP.
+module "public_dns_record" {
+  source = "../../submodules/dns/public_ALIAS_record"
+
+  name = "auth.${var.dns_info["public"]["zone"]}"
+  target_dns_name = aws_lb.external.dns_name
+  target_zone_id  = aws_lb.external.zone_id
+  dns_info = var.dns_info
+
+  providers = {
+    aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+  }
+}
+
+resource "aws_lb" "external" {
+  name = "rhsso-external-nlb"
+  load_balancer_type = "network"
+  internal = false
+  subnets = var.public_subnets
+
+  access_logs {
+    bucket  = "xdr-elb-${ var.environment }"
+    enabled = true
+  }
+
+  enable_cross_zone_load_balancing = true
+  idle_timeout                = 300
+
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_lb_listener" "nlb_443" {
+  load_balancer_arn = aws_lb.external.arn
+  port              = "443"
+  protocol          = "TCP"
+
+  default_action {
+    type             = "forward"
+    target_group_arn = aws_lb_target_group.external.arn
+  }
+}
+
+resource "aws_lb_target_group" "external" {
+  name     = "rhsso-external-nlb"
+  port     = 8443
+  protocol = "TCP"
+  vpc_id   = var.vpc_id
+  target_type = "instance"
+
+  health_check {
+    enabled = true
+    #healthy_threshold   = 3
+    #unhealthy_threshold = 2
+    timeout = 10
+    interval = 10
+    #matcher = "200,302"
+    path = "/"
+    protocol = "HTTPS"
+  }
+
+  stickiness {
+    enabled = true
+    type = "source_ip" # only option for NLBs
+  }
+}
+
+# Create a new load balancer attachment
+resource "aws_lb_target_group_attachment" "external_attachment" {
+  count = var.rhsso_instance_count
+  target_group_arn = aws_lb_target_group.external.arn
+  target_id = aws_instance.instance[count.index].id
+}

+ 20 - 0
base/rhsso/outputs.tf

@@ -0,0 +1,20 @@
+output db_password {
+  value = random_password.password.result
+  sensitive = true # To get this output, request it specifically with `terragrunt output db_password`
+}
+
+output db_endpoint {
+  value = module.rhsso_db.db_instance_endpoint
+}
+
+#output instance_arn {
+#  value = aws_instance.instance.arn
+#}
+#
+#output instance_public_ip {
+#  value = aws_eip.instance.public_ip
+#}
+#
+output instance_private_ip {
+  value = aws_instance.instance[*].private_ip
+}

+ 19 - 0
base/rhsso/rds-key.tf

@@ -0,0 +1,19 @@
+locals {
+  # For the default EBS key, we allow the entire account access
+  root_arn = "arn:${var.aws_partition}:iam::${var.aws_account_id}:root"
+}
+
+module "rhsso_key" {
+  source = "../../submodules/kms/ebs-key"
+
+  name = "rhsso_key"
+  alias = "alias/rhsso"
+  description = "encrypt and decrypt the rhsso RDS" 
+  tags = merge(var.standard_tags, var.tags)
+  key_admin_arns = [ ]
+  key_user_arns = concat([ local.root_arn ], var.extra_key_users)
+  key_attacher_arns = concat([ local.root_arn ], var.extra_key_attachers)
+  standard_tags = var.standard_tags
+  aws_account_id = var.aws_account_id
+  aws_partition = var.aws_partition
+}

+ 83 - 0
base/rhsso/rds.tf

@@ -0,0 +1,83 @@
+data "aws_rds_certificate" "latest" {
+  latest_valid_till = true
+}
+
+locals {
+  # GovCloud and Commercial use different CA certs
+  ca_cert_identifier = var.aws_partition == "aws" ? "rds-ca-2019" : "rds-ca-2017"
+}
+
+output "ca_cert_identifier" {
+  value = {
+    "current": local.ca_cert_identifier,
+    "latest":  data.aws_rds_certificate.latest.id
+  }
+}
+
+resource "random_password" "password" {
+  keepers          = {
+    "version": 1 # increment to change the password
+    # n.b. you could add other stuff to make this change automatically, e.g.
+    # "instance_type": var.instance_type
+    # Would then change this password every time the instance type changes.
+  }
+  length           = 32
+  special          = true
+  min_lower = 1
+  min_numeric = 1
+  min_upper = 1
+  min_special = 1
+  override_special = "~!%^()-_+"
+}
+
+module "rhsso_db" {
+  source = "terraform-aws-modules/rds/aws"
+  version = "~> v3.0"
+
+  identifier = var.identifier # this is the RDS identifier, not the DB name
+  name = "rhsso" # the DB name
+
+  engine             = "postgres"
+  engine_version     = "12.5"
+  instance_class     = var.db_instance_type
+  allocated_storage  = var.rds_storage
+  storage_encrypted  = true
+  kms_key_id = module.rhsso_key.key_arn
+  apply_immediately  = true # do not wait for maintenance window for changes
+  ca_cert_identifier = local.ca_cert_identifier
+
+  # NOTE: Do NOT use 'user' as the value for 'username' as it throws:
+  # "Error creating DB Instance: InvalidParameterValue: MasterUsername
+  # user cannot be used as it is a reserved word used by the engine"
+  username = "rhsso"
+  password = random_password.password.result
+
+  port     = "5432"
+
+  vpc_security_group_ids = [ aws_security_group.rhsso_rds_sg.id ]
+
+  backup_window      = "00:00-03:00"
+  maintenance_window = "Mon:03:00-Mon:06:00"
+
+  # disable backups to create DB faster
+  backup_retention_period = var.environment == "test" ? 0 : 35
+
+  tags = merge(var.standard_tags, var.tags)
+
+  enabled_cloudwatch_logs_exports = ["postgresql", "upgrade"]
+
+  # DB subnet group
+  subnet_ids = var.private_subnets
+
+  # DB parameter group
+  family = "postgres12"
+
+  # DB option group
+  major_engine_version = "12"
+
+  # Snapshot name upon DB deletion
+  final_snapshot_identifier_prefix = "${var.identifier}-final-snapshot"
+
+  # Database Deletion Protection
+  deletion_protection = var.instance_termination_protection
+}

+ 46 - 0
base/rhsso/security-groups-elb.tf.skipped

@@ -0,0 +1,46 @@
+resource "aws_security_group" "elb_external" {
+  name = "Keycloak ELB"
+  description = "Keycloak Instances"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group_rule" "elb-http-in" {
+  description = "Inbound HTTP, for redirect only"
+  type = "ingress"
+  from_port = "80"
+  to_port = "80"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.elb_external.id
+}
+
+resource "aws_security_group_rule" "elb-https-in" {
+  description = "Inbound HTTPS, where the magic happens"
+  type = "ingress"
+  from_port = "443"
+  to_port = "443"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.elb_external.id
+}
+
+resource "aws_security_group_rule" "elb-alt-http-to-instances" {
+  description = ""
+  type = "egress"
+  from_port = "8080"
+  to_port = "8080"
+  protocol = "TCP"
+  security_group_id = aws_security_group.elb_external.id
+  source_security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "elb-alt-https-to-instances" {
+  description = ""
+  type = "egress"
+  from_port = "8443"
+  to_port = "8443"
+  protocol = "TCP"
+  security_group_id = aws_security_group.elb_external.id
+  source_security_group_id = aws_security_group.instance.id
+}

+ 26 - 0
base/rhsso/security-groups-rds.tf

@@ -0,0 +1,26 @@
+resource "aws_security_group" "rhsso_rds_sg" {
+  name = "${var.identifier}_rds_sg"
+  description = "Security Group for KeyCloak RDS"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group_rule" "rhsso_rds_in" {
+  description = "Inbound Postgres"
+  type = "ingress"
+  from_port = 5432
+  to_port = 5432
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["vpc-public"]
+  security_group_id = aws_security_group.rhsso_rds_sg.id
+}
+
+resource "aws_security_group_rule" "rhsso_security_in" {
+  description = "Inbound From Scanners"
+  type = "ingress"
+  from_port =  0
+  to_port = 65535
+  protocol = "-1"
+  cidr_blocks = var.cidr_map["vpc-scanners"]
+  security_group_id = aws_security_group.rhsso_rds_sg.id
+}

+ 179 - 0
base/rhsso/security-groups.tf

@@ -0,0 +1,179 @@
+# Rather than pass in the aws security group, we just look it up. This will
+# probably be useful other places, as well.
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+data "aws_security_group" "aws_endpoints" {
+  name   = "aws_endpoints"
+  vpc_id = var.vpc_id
+}
+
+#   ajp port: 8009
+#   http: 8080
+#   https: 8443
+#   mgmt-http: 9990
+#   mgmt-https: 9993
+#   txn-recovery-environment: 4712
+#   txn-status-manager: 4713
+
+resource "aws_security_group" "instance" {
+  name = "RHSSO"
+  description = "RHSSO Instances"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group_rule" "cluster-connectivity-ingress" {
+  description = "Receive any from other cluster members"
+  type = "ingress"
+  from_port = -1
+  to_port = -1
+  protocol = -1
+  security_group_id = aws_security_group.instance.id
+  source_security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "cluster-connectivity-egress" {
+  description = "send any to other cluster members"
+  type = "egress"
+  from_port = -1
+  to_port = -1
+  protocol = -1
+  security_group_id = aws_security_group.instance.id
+  source_security_group_id = aws_security_group.instance.id
+}
+
+
+#resource "aws_security_group_rule" "instance-http-in" {
+#  description = ""
+#  type = "ingress"
+#  from_port = "80"
+#  to_port = "80"
+#  protocol = "tcp"
+#  cidr_blocks = [ "0.0.0.0/0" ]
+#  security_group_id = aws_security_group.instance.id
+#}
+#
+#resource "aws_security_group_rule" "instance-https-in" {
+#  description = ""
+#  type = "ingress"
+#  from_port = "443"
+#  to_port = "443"
+#  protocol = "tcp"
+#  cidr_blocks = [ "0.0.0.0/0" ]
+#  security_group_id = aws_security_group.instance.id
+#}
+#
+#resource "aws_security_group_rule" "instance-ajp-in" {
+#  description = ""
+#  type = "ingress"
+#  from_port = "8009"
+#  to_port = "8009"
+#  protocol = "tcp"
+#  cidr_blocks = [ "0.0.0.0/0" ]
+#  security_group_id = aws_security_group.instance.id
+#}
+
+resource "aws_security_group_rule" "instance-alt-http-in-from-access" {
+  description = "Alt HTTP from access"
+  type = "ingress"
+  from_port = "8080"
+  to_port = "8080"
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["vpc-access"]
+  security_group_id = aws_security_group.instance.id
+}
+
+#resource "aws_security_group_rule" "instance-alt-http-in-from-elb" {
+#  description = "Alt HTTP from ELB"
+#  type = "ingress"
+#  from_port = "8080"
+#  to_port = "8080"
+#  protocol = "tcp"
+#  security_group_id = aws_security_group.instance.id
+#  source_security_group_id = aws_security_group.elb_external.id
+#}
+
+resource "aws_security_group_rule" "instance-alt-https-in-from-access" {
+  description = "Alt HTTPS from Access"
+  type = "ingress"
+  from_port = "8443"
+  to_port = "8443"
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["vpc-access"]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-alt-https-in-from-nlb" {
+  description = "Alt HTTPS from Internet"
+  type = "ingress"
+  from_port = "8443"
+  to_port = "8443"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-mgmt-in-from-access" {
+  description = "Management HTTPS from Access"
+  type = "ingress"
+  from_port = "9990"
+  to_port = "9990"
+  protocol = "tcp"
+  cidr_blocks = var.cidr_map["vpc-access"]
+  security_group_id = aws_security_group.instance.id
+}
+
+resource "aws_security_group_rule" "instance-db-outbound" {
+  description = "Postgres Outbound"
+  type = "egress"
+  from_port = "5432"
+  to_port = "5432"
+  protocol = "tcp"
+  security_group_id = aws_security_group.instance.id
+  source_security_group_id = aws_security_group.rhsso_rds_sg.id
+}
+
+
+#resource "aws_security_group_rule" "instance-mgmt-http-in" {
+#  description = ""
+#  type = "ingress"
+#  from_port = "9990"
+#  to_port = "9990"
+#  protocol = "tcp"
+#  cidr_blocks = [ "0.0.0.0/0" ]
+#  security_group_id = aws_security_group.instance.id
+#}
+#
+#resource "aws_security_group_rule" "instance-mgmt-https-in" {
+#  description = ""
+#  type = "ingress"
+#  from_port = "9993"
+#  to_port = "9993"
+#  protocol = "tcp"
+#  cidr_blocks = [ "0.0.0.0/0" ]
+#  security_group_id = aws_security_group.instance.id
+#}
+#
+#resource "aws_security_group_rule" "instance-txn-in" {
+#  description = ""
+#  type = "ingress"
+#  from_port = "4712"
+#  to_port = "4713"
+#  protocol = "tcp"
+#  cidr_blocks = [ "0.0.0.0/0" ]
+#  security_group_id = aws_security_group.instance.id
+#}
+#
+## lock down before production, but I couldn't get letsencrypt to work with the proxy
+#resource "aws_security_group_rule" "instance-all-out" {
+#  description = ""
+#  type = "egress"
+#  from_port = "-1"
+#  to_port = "-1"
+#  protocol = "-1"
+#  cidr_blocks = [ "0.0.0.0/0" ]
+#  security_group_id = aws_security_group.instance.id
+#}

+ 84 - 0
base/rhsso/vars.tf

@@ -0,0 +1,84 @@
+variable extra_key_users {
+  description = "Extra encryption key users."
+  type = list
+  default = [ ]
+}
+
+variable extra_key_attachers {
+  description = "Extra encryption key attachers."
+  type = list
+  default = [ ]
+}
+
+variable identifier {
+  description = "RDS Identifier"
+  type = string
+  default = "rhsso"
+}
+
+variable rds_storage {
+  type = number
+  default = 100
+}
+
+variable "rhsso_instance_count" {
+  description = "how many instances"
+  type = number
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "private_subnets" {
+  type = list(string)
+}
+
+variable "public_subnets" {
+  type = list(string)
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "db_instance_type" { 
+  type = string
+  default = "db.t3.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "xdr_interconnect" { type = list(string) }
+variable "nga_pop" { type = list(string) }
+variable "afs_azure_pop" { type = list(string) }
+variable "afs_pop" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "aws_account_id" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/rhsso/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.13"
+}