瀏覽代碼

Next Version of GHE Systems

ghe-backup system added
dns entries added
misc fixes
Fred Damstra 4 年之前
父節點
當前提交
066ddcdb15

+ 173 - 0
base/github/backup_server.tf

@@ -0,0 +1,173 @@
+# Some instance variables
+locals {
+  ami_selection       = "minion" # master, minion, ...
+}
+
+resource "aws_network_interface" "ghe-backup-interface" {
+  subnet_id = var.private_subnets[0]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.ghe_backup_server.id ]
+  description = "ghe-backup"
+  tags = merge(var.standard_tags, var.tags, { Name = "github-backup" })
+}
+
+resource "aws_instance" "ghe-backup-instance" {
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.backup_instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "msoc-default-instance-profile"
+
+  ami = local.ami_map[local.ami_selection]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  #lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+  lifecycle { ignore_changes = [ ami, key_name, user_data ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp3"
+      #volume_size = "60"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    volume_type = "gp3"
+    volume_size = 8
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    volume_type = "gp3"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings[local.ami_selection]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.ghe-backup-interface.id
+  }
+
+  user_data = data.template_cloudinit_config.cloud-init.rendered
+  tags = merge( var.standard_tags, var.tags, { Name = "github-backup" })
+  volume_tags = merge( var.standard_tags, var.tags, { Name = "github-backup" })
+}
+
+data "template_file" "cloud-init" {
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = file("${path.module}/cloud-init/cloud-init.tpl")
+
+  vars = {
+    hostname = "ghe-backup"
+    fqdn = "ghe-backup.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+    aws_region = var.aws_region
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "cloud-init" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = data.template_file.cloud-init.rendered
+  }
+
+  # Additional parts as needed
+  #part {
+  #  content_type = "text/x-shellscript"
+  #  content      = "ffbaz"
+  #}
+}
+
+module "private_dns_record_ghe_backup" {
+  source = "../../submodules/dns/private_A_record"
+
+  name = "ghe-backup"
+  ip_addresses = [ aws_instance.ghe-backup-instance.private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
+
+  providers = {
+    aws.c2 = aws.c2
+  }
+}

+ 73 - 0
base/github/cloud-init/cloud-init.tpl

@@ -0,0 +1,73 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.${ aws_region }.amazonaws.com,ec2messages.${ aws_region }.amazonaws.com,ec2.${ aws_region }.amazonaws.com,ssmmessages.${ aws_region }.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: ${salt_master}
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+      aws_region: ${ aws_region }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 21 - 0
base/github/efs.tf

@@ -0,0 +1,21 @@
+resource "aws_efs_file_system" "ghe_backup_data" {
+  creation_token = "ghe-backups"
+  encrypted      = true
+  kms_key_id     = aws_kms_key.ghe_backup_data.arn
+
+  ### KMS bits needed
+
+
+  ### 0.12
+  lifecycle_policy {
+    transition_to_ia = "AFTER_60_DAYS"
+  }
+
+  tags = merge( var.standard_tags, var.tags, { Name = "GitHub Enterprise Backup Data" })
+}
+
+resource "aws_efs_mount_target" "ghe_backup_mount" {
+  file_system_id  = aws_efs_file_system.ghe_backup_data.id
+  subnet_id       = var.private_subnets[0]
+  security_groups = [ aws_security_group.ghe_backup_server.id ]
+}

+ 23 - 22
base/github/ec2.tf → base/github/github_servers.tf

@@ -29,38 +29,39 @@ resource "aws_instance" "ghe" {
   # single space to disable default module behavior
   root_block_device {
       volume_size           = 200
-      volume_type           = "io1"
-      iops                  = 1000
+      volume_type           = "gp3"
+      iops                  = 3000
       delete_on_termination = true
       encrypted             = true
       kms_key_id            = data.aws_kms_key.ebs-key.arn
     } 
 
+  ebs_block_device {
+    # github data
+    # Note: Not in AMI
+    device_name = "/dev/xvdf"
+    volume_size = var.github_data_volume_size
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    volume_type = "gp3"
+    iops = 3000
+  }
+
   tags = merge( var.standard_tags, var.tags, { Name = format("%s-%s", "github-enterprise", count.index) })
   volume_tags = merge( var.standard_tags, var.tags, { Name = format("%s-%s", "github-enterprise", count.index) })
 }
 
-resource "aws_ebs_volume" "ghe_data_volume" {
-  count             = var.instance_count
-
-  availability_zone = var.azs[count.index]
-  size              = var.github_data_volume_size
-  type              = "io1"
-  iops              = 1500
-  encrypted         = true
-  kms_key_id        = data.aws_kms_key.ebs-key.arn
+# Would need this a second time if count > 0
+module "private_dns_record_ghe_backup_0" {
+  source = "../../submodules/dns/private_A_record"
 
-  tags = merge( var.standard_tags, var.tags, { Name = format("%s-%s", "github-enterprise-data", count.index) })
+  name = format("%s-%s", "github-enterprise", 0)
+  ip_addresses = [ aws_instance.ghe[0].private_ip ]
+  dns_info = var.dns_info
+  reverse_enabled = var.reverse_enabled
 
-  lifecycle {
-    ignore_changes = [ snapshot_id ]
+  providers = {
+    aws.c2 = aws.c2
   }
 }
-
-resource "aws_volume_attachment" "ghe_data_volume" {
-  count         = var.instance_count
-  device_name   = "/dev/xvdf"
-  volume_id     = aws_ebs_volume.ghe_data_volume[count.index].id
-  instance_id   = aws_instance.ghe[count.index].id
-  force_detach  = true
-}

+ 168 - 0
base/github/kms.tf

@@ -0,0 +1,168 @@
+resource "aws_kms_key" "ghe_backup_data" {
+  description = "EFS for Github Backup Server"
+  policy      = data.aws_iam_policy_document.ghe_backup_data_policy.json
+  enable_key_rotation = true
+}
+
+resource "aws_kms_alias" "ghe_backup_data" {
+  name          = "alias/ghe_backup_data"
+  target_key_id = aws_kms_key.ghe_backup_data.key_id
+}
+
+data "aws_iam_policy_document" "ghe_backup_data_policy" {
+  policy_id = "ghe_backup_policy"
+
+  statement {
+    sid    = "Enable IAM User Permissions"
+    effect = "Allow"
+
+    principals {
+      type        = "AWS"
+      identifiers = ["arn:${var.aws_partition}:iam::${var.aws_account_id}:root"]
+    }
+
+    actions   = ["kms:*"]
+    resources = ["*"]
+  }
+
+  statement {
+    sid    = "Allow access for Key Administrators"
+    effect = "Allow"
+
+    principals {
+      type = "AWS"
+
+      identifiers = [
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/user/mdr_terraformer",
+      ]
+    }
+
+    actions = [
+      "kms:Create*",
+      "kms:Describe*",
+      "kms:Enable*",
+      "kms:List*",
+      "kms:Put*",
+      "kms:Update*",
+      "kms:Revoke*",
+      "kms:Disable*",
+      "kms:Get*",
+      "kms:Delete*",
+      "kms:TagResource",
+      "kms:UntagResource",
+      "kms:ScheduleKeyDeletion",
+      "kms:CancelKeyDeletion",
+    ]
+
+    resources = ["*"]
+  }
+
+  statement {
+    sid    = "Allow use of the key"
+    effect = "Allow"
+
+    principals {
+      type = "AWS"
+
+      identifiers = [
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/user/mdr_terraformer",
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/msoc-default-instance-role",
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/portal-instance-role",
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling",
+      ]
+    }
+
+    actions = [
+      "kms:Encrypt",
+      "kms:Decrypt",
+      "kms:ReEncrypt*",
+      "kms:GenerateDataKey*",
+      "kms:DescribeKey",
+    ]
+
+    resources = ["*"]
+  }
+
+  statement {
+    sid    = "Allow attachment of persistent resources"
+    effect = "Allow"
+
+    principals {
+      type = "AWS"
+
+      identifiers = [
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/user/mdr_terraformer",
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/msoc-default-instance-role",
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/portal-instance-role",
+        "arn:${var.aws_partition}:iam::${var.aws_account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling",
+      ]
+    }
+
+    actions = [
+      "kms:CreateGrant",
+      "kms:ListGrants",
+      "kms:RevokeGrant",
+    ]
+
+    resources = ["*"]
+
+    condition {
+      test     = "Bool"
+      variable = "kms:GrantIsForAWSResource"
+      values   = ["true"]
+    }
+  }
+
+  # Basically copied from the default key AWS makes, hopefully improved to
+  # make it work in multiple AWS regions with a single policy
+  statement {
+    sid    = "elasticfilesystem"
+    effect = "Allow"
+
+    principals {
+      type        = "AWS"
+      identifiers = ["*"]
+    }
+
+    actions = [
+      "kms:Encrypt",
+      "kms:Decrypt",
+      "kms:ReEncrypt*",
+      "kms:GenerateDataKey*",
+      "kms:CreateGrant",
+      "kms:DescribeKey",
+    ]
+
+    resources = ["*"]
+
+    # https://docs.aws.amazon.com/efs/latest/ug/logging-using-cloudtrail.html#efs-encryption-cloudtrail
+    condition {
+      test     = "StringEquals"
+      variable = "kms:CallerAccount"
+
+      values = [
+        "055650462987", # US East (N. Virginia)
+        "771736226457", # US East (Ohio) 
+        "208867197265", # US West (N. California)
+        "736298361104", # US West (Oregon)
+        "167972735943", # US GovCloud (East)
+        "174619389399", # US GovCloud (West)
+      ]
+    }
+
+    # https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-via-service
+    condition {
+      test     = "StringEquals"
+      variable = "kms:ViaService"
+
+      values = [
+        "elasticfilesystem.us-east-1.amazonaws.com",
+        "elasticfilesystem.us-east-2.amazonaws.com",
+        "elasticfilesystem.us-west-1.amazonaws.com",
+        "elasticfilesystem.us-west-2.amazonaws.com",
+        "elasticfilesystem.us-gov-east-1.amazonaws.com",
+        "elasticfilesystem.us-gov-west-1.amazonaws.com",
+      ]
+    }
+  }
+}

+ 12 - 0
base/github/outputs.tf

@@ -1,3 +1,11 @@
+output backup_instance_arn {
+  value = aws_instance.ghe-backup-instance.arn
+}
+
+output backup_instance_private_ip {
+  value = aws_instance.ghe-backup-instance.private_ip
+}
+
 output instance_arn {
   value = [ for instance in aws_instance.ghe[*]: instance.arn ]
 }
@@ -13,3 +21,7 @@ output public_url {
 output private_url {
   value = "github.${var.dns_info["private"]["zone"]}"
 }
+
+output efs_id {
+  value = aws_efs_file_system.ghe_backup_data.id
+}

+ 40 - 0
base/github/securitygroup-backupserver.tf

@@ -0,0 +1,40 @@
+resource "aws_security_group" "ghe_backup_server" {
+  name = "ghe-backup"
+
+  tags = {
+    "Name" = "ghe-backup"
+  }
+
+  vpc_id      = var.vpc_id
+  description = "github backup server"
+}
+
+resource "aws_security_group_rule" "ghe_backup_server_122_to_github" {
+  security_group_id        = aws_security_group.ghe_backup_server.id
+  type                     = "egress"
+  source_security_group_id = aws_security_group.ghe_server.id
+  from_port                = 122
+  to_port                  = 122
+  protocol                 = "tcp"
+  description              = "Outbound ssh to GH mgmt"
+}
+
+resource "aws_security_group_rule" "ghe_backup_server_egress_nfs" {
+  security_group_id        = aws_security_group.ghe_backup_server.id
+  type                     = "egress"
+  source_security_group_id = aws_security_group.ghe_backup_server.id
+  from_port                = 2049
+  to_port                  = 2049
+  protocol                 = "tcp"
+  description              = "Outbound NFS"
+}
+
+resource "aws_security_group_rule" "ghe_backup_server_ingress_nfs" {
+  security_group_id        = aws_security_group.ghe_backup_server.id
+  type                     = "ingress"
+  source_security_group_id = aws_security_group.ghe_backup_server.id
+  from_port                = 2049
+  to_port                  = 2049
+  protocol                 = "tcp"
+  description              = "Inbound NFS"
+}

+ 20 - 0
base/github/securitygroup-server.tf

@@ -67,6 +67,16 @@ resource "aws_security_group_rule" "ghe_server_inbound_mgmt_ssh_sgs" {
   description              = "Inbound ssh (for mgmt)"
 }
 
+resource "aws_security_group_rule" "ghe_server_inbound_mgmt_ssh_backup_sgs" {
+  security_group_id        = aws_security_group.ghe_server.id
+  source_security_group_id = aws_security_group.ghe_backup_server.id
+  type                     = "ingress"
+  from_port                = 122
+  to_port                  = 122
+  protocol                 = "tcp"
+  description              = "Inbound ssh (for mgmt)"
+}
+
 resource "aws_security_group_rule" "ghe_server_inbound_https_cidr" {
   security_group_id        = aws_security_group.ghe_server.id
   type                     = "ingress"
@@ -117,6 +127,16 @@ resource "aws_security_group_rule" "ghe_server_inbound_mgmt_https_sgs" {
   description              = "Inbound ssh (for mgmt)"
 }
 
+resource "aws_security_group_rule" "ghe_server_inbound_mgmt_https_backup_sgs" {
+  security_group_id        = aws_security_group.ghe_server.id
+  source_security_group_id = aws_security_group.ghe_backup_server.id
+  type                     = "ingress"
+  from_port                = 8443
+  to_port                  = 8443
+  protocol                 = "tcp"
+  description              = "Inbound ssh (for mgmt)"
+}
+
 resource "aws_security_group_rule" "ghe_server_inbound_https_internal_elb_8444" {
   security_group_id        = aws_security_group.ghe_server.id
   source_security_group_id = aws_security_group.ghe_elb_internal.id

+ 5 - 0
base/github/vars.tf

@@ -33,6 +33,11 @@ variable "instance_type" {
   default = "t3a.micro"
 }
 
+variable "backup_instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
 variable "reverse_enabled" { 
   description = "Whether to create the reverse DNS entry."
   type = bool