瀏覽代碼

Adds Salt Master to Test Env

Brad Poulton 4 年之前
父節點
當前提交
415a177371

+ 1 - 0
base/account_standards/iam.tf

@@ -264,3 +264,4 @@ resource "aws_iam_role_policy" "splunk_addon_for_aws" {
 }
 EOF
 }
+

+ 1 - 0
base/salt_master/amis.tf

@@ -0,0 +1 @@
+../amis.tf

+ 72 - 0
base/salt_master/cloud-init/cloud_init_salt_master.tpl

@@ -0,0 +1,72 @@
+#cloud-config
+preserve_hostname: false
+hostname: ${hostname}
+salt-master: ${salt_master}
+fqdn: ${fqdn}
+
+# Write files happens early
+write_files:
+- content: |
+    proxy=http://${proxy}:80
+  path: /etc/yum.conf
+  append: true
+- content: |
+    [global]
+    proxy=${proxy}
+  path: /etc/pip.conf
+- content: |
+    export HTTPS_PROXY=http://${proxy}:80
+    export HTTP_PROXY=http://${proxy}:80
+    export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.us-east-1.amazonaws.com,ec2messages.us-east-1.amazonaws.com,ec2.us-east-1.amazonaws.com,ssmmessages.us-east-1.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
+    export https_proxy=$HTTPS_PROXY
+    export http_proxy=$HTTP_PROXY
+    export no_proxy=$NO_PROXY
+  path: /etc/profile.d/proxy.sh
+- content: |
+    ${fqdn}
+  path: /etc/salt/minion_id
+- content: |
+    master: 127.0.0.1
+  path: /etc/salt/minion
+- content: |
+    grains:
+      environment: ${ environment }
+      aws_partition: ${ aws_partition }
+      aws_partition_alias: ${ aws_partition_alias }
+  path: /etc/salt/minion.d/cloud_init_grains.conf
+
+#yum_repos:
+#  epel-release:
+#    baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
+#    enabled: false
+#    failovermethod: priority
+#    gpgcheck: true
+#    gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
+#    name: Extra Packages for Enterprise Linux 7 - Release
+
+packages:
+ - vim
+
+package_update: true # Always patch
+
+growpart:
+  mode: auto
+  devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
+  ignore_growroot_disabled: false
+
+runcmd:
+ - /bin/systemctl restart salt-minion
+ - /bin/systemctl enable salt-minion
+ - /bin/systemctl start amazon-ssm-agent
+ - /bin/systemctl enable amazon-ssm-agent
+ - /usr/sbin/aide --update --verbose=0
+ - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
+
+# Either final message or power state, but probably not both
+final_message: "The system is up after $UPTIME seconds"
+#power_state:
+#  delay: "+30"
+#  mode: reboot
+#  message: "System configured after $UPTIME seconds"
+#  timeout: 300
+#  condition: true

+ 140 - 0
base/salt_master/cloud-init/provision_salt_master.sh

@@ -0,0 +1,140 @@
+#!/bin/bash
+
+SUDO=""
+LOG_FILE=/var/log/cloud-init-provision-salt-master-output.log
+exec > >(tee ${LOG_FILE}) 2>&1
+
+echo "Started provision_salt_master.sh" 
+
+#Install dependencies. The virtual env was used to reduce the python module conflicts between rpm and pip.
+echo "Install dependencies" 
+yum install GitPython --enablerepo=epel -y
+yum install python-virtualenv -y
+virtualenv ~/awscli
+
+#check if proxy settings are ready
+if [ -s "/etc/pip.conf" ]; then
+    ~/awscli/bin/pip install awscli
+else
+    echo "pip proxy not ready"
+fi
+
+chmod +x ~/awscli/bin/aws
+
+#we need to refresh our bash session to pick up the proxy settings. 
+if [ -s "/etc/profile.d/proxy.sh" ]; then
+    source /etc/profile.d/proxy.sh
+else
+    echo "System proxy not ready"
+fi
+
+~/awscli/bin/aws secretsmanager get-secret-value --region us-gov-east-1  --secret-id saltmaster/ssh_key --query SecretString --output text > ~root/.ssh/github_read_only
+chmod 0600 ~root/.ssh/github_read_only
+
+#GPG Keys
+echo "GPG Keys" 
+mkdir -p /etc/salt/gpgkeys
+chmod 0700 /etc/salt/gpgkeys
+~/awscli/bin/aws secretsmanager get-secret-value --region us-gov-east-1  --secret-id saltmaster/gpg/private --query SecretString --output text > /etc/salt/gpgkeys/private
+~/awscli/bin/aws secretsmanager get-secret-value --region us-gov-east-1  --secret-id saltmaster/gpg/ownertrust --query SecretString --output text > /etc/salt/gpgkeys/ownertrust
+chmod 0600 /etc/salt/gpgkeys/private
+chmod 0600 /etc/salt/gpgkeys/ownertrust
+gpg --import --yes --batch -q --homedir /etc/salt/gpgkeys/ /etc/salt/gpgkeys/private
+gpg --import-ownertrust --homedir /etc/salt/gpgkeys/ /etc/salt/gpgkeys/ownertrust
+
+#Salt Master Pub/Private
+~/awscli/bin/aws secretsmanager get-secret-value --region us-gov-east-1  --secret-id saltmaster/master.pem --query SecretString --output text > /etc/salt/pki/master/master.pem
+~/awscli/bin/aws secretsmanager get-secret-value --region us-gov-east-1  --secret-id saltmaster/master.pub --query SecretString --output text > /etc/salt/pki/master/master.pub
+chmod 0400 /etc/salt/pki/master/master.pem
+
+#clean up. These are not needed after initial bootstrapping. 
+yum remove python-virtualenv -y
+rm -rf ~/awscli
+
+cat > ~/.ssh/config << 'EOF'
+
+Host github.mdr.defpoint.com
+  IdentityFile ~/.ssh/github_read_only
+
+EOF
+chmod 0400 ~/.ssh/config
+
+cat - > ~/.ssh/known_hosts << 'EOF'
+github.mdr.defpoint.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBObDNqJCB+iXyR2hm0GRERmfEl33E7Kiu+UGmSHHC878NQjsvOtLxoRAPIU07bCzKutFNZCi+8bRkQWXtOT2InA=
+EOF
+
+cat > /etc/salt/master.d/gpg_pillar.conf << 'EOF'
+
+decrypt_pillar:
+  - 'secrets' : gpg
+
+EOF
+
+#For both Test and Prod start out pointing to the Master branch. After inital startup, it is expected that a highstate will be run which will change the branch in Test to develop.  
+cat - > /etc/salt/master.d/gitfs.conf << 'EOF'
+
+fileserver_backend:
+  - gitfs
+  - roots
+
+gitfs_saltenv_whitelist:
+  - base
+  - master
+  - develop
+
+# File roots via Git
+gitfs_provider: gitpython
+gitfs_update_interval: 600
+gitfs_base: master
+gitfs_remotes:
+  - git@github.mdr.defpoint.com:mdr-engineering/msoc-infrastructure.git:
+    - name: gitfs-base-msoc
+    - base: master
+    - root: salt/fileroots
+  # File roots for the CM's
+  - git@github.mdr.defpoint.com:mdr-engineering/msoc-moose-cm.git:
+    - name: msoc-moose-cm
+    - base: master
+    - mountpoint: salt://customer_repos/msoc-moose-cm
+  - git@github.mdr.defpoint.com:mdr-engineering/msoc-afs-cm.git:
+    - name: msoc-afs-cm
+    - base: master
+    - mountpoint: salt://customer_repos/msoc-afs-cm
+  - git@github.mdr.defpoint.com:mdr-engineering/msoc-nga-cm.git:
+    - name: msoc-nga-cm
+    - base: master
+    - mountpoint: salt://customer_repos/msoc-nga-cm
+  # File roots for the deployment servers
+  - git@github.mdr.defpoint.com:mdr-engineering/msoc-nga-pop.git:
+    - name: msoc-nga-pop
+    - base: master
+    - mountpoint: salt://deployment_servers/msoc-nga-pop
+  - git@github.mdr.defpoint.com:mdr-engineering/msoc-afs-pop.git:
+    - name: msoc-afs-pop
+    - base: master
+    - mountpoint: salt://deployment_servers/msoc-afs-pop
+
+
+# Pillar via Git configs
+git_pillar_provider: gitpython
+git_pillar_root:    salt/pillar
+git_pillar_base:    master
+git_pillar_branch: master
+ext_pillar:
+  - git:
+    - git@github.mdr.defpoint.com:mdr-engineering/msoc-infrastructure.git:
+      - name: salt-piller-base
+      - env: base
+
+EOF
+
+systemctl restart salt-master
+systemctl enable salt-master
+
+#This attempts to help out with accepting the minion key.
+sleep 60
+salt-key -A -y
+
+salt-call state.highstate
+
+echo "Ending provision_salt_master.sh" 

+ 85 - 0
base/salt_master/iam.tf

@@ -0,0 +1,85 @@
+#############################
+# Salt Master instance profile
+#
+# Salt Master got needs for some sweet sweet passwords
+
+resource "aws_iam_instance_profile" "salt_master_instance_profile" {
+  name     = "salt-master-instance-profile"
+  role     = aws_iam_role.salt_master_instance_role.name
+}
+
+resource "aws_iam_role" "salt_master_instance_role" {
+  name     = "salt-master-instance-role"
+  assume_role_policy = <<EOF
+{   
+    "Version": "2012-10-17",
+    "Statement": [
+      {   
+        "Sid": "", 
+        "Effect": "Allow",
+        "Principal": {
+          "Service": [
+            "ec2.amazonaws.com",
+            "ssm.amazonaws.com"
+            ]
+        },
+        "Action": "sts:AssumeRole"
+      }
+    ]
+  }
+EOF
+}
+
+data "aws_iam_policy_document" "salt_master_policy_doc" {
+  statement {
+    sid    = "AllowSaltSecretsCommunication"
+    effect = "Allow"
+
+    actions = [
+      "secretsmanager:GetResourcePolicy",
+      "secretsmanager:GetSecretValue",
+      "secretsmanager:DescribeSecret",
+      "secretsmanager:ListSecretVersionIds"
+    ]
+
+    resources = [
+      "arn:${var.aws_partition}:secretsmanager:*:*:secret:saltmaster/*"
+    ]
+  }
+
+  statement {
+    sid    = "AllowAssumeRole"
+    effect = "Allow"
+
+    actions = [
+      "sts:AssumeRole"
+    ]
+
+    resources = [
+      "arn:${var.aws_partition}:iam::*:role/service/salt-master-inventory-role"
+    ]
+  }
+}
+
+resource "aws_iam_policy" "salt_master_policy" {
+  name        = "salt_master_sm"
+  path        = "/"
+  policy      = data.aws_iam_policy_document.salt_master_policy_doc.json
+}
+
+resource "aws_iam_role_policy_attachment" "salt_master_sm_attach" {
+  role       = aws_iam_role.salt_master_instance_role.name
+  policy_arn = aws_iam_policy.salt_master_policy.arn
+}
+
+resource "aws_iam_role_policy_attachment" "salt_master_AmazonEC2RoleforSSM" {
+  role       = aws_iam_role.salt_master_instance_role.name
+  policy_arn = "arn:${var.aws_partition}:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
+
+}
+
+#This policy needs to be create prior to creating the Salt Master
+resource "aws_iam_role_policy_attachment" "salt_master_policy_attach" {
+  role       = aws_iam_role.salt_master_instance_role.name
+  policy_arn = "arn:${var.aws_partition}:iam::${var.aws_account_id}:policy/launchroles/default_instance_tag_read"
+}

+ 286 - 0
base/salt_master/main.tf

@@ -0,0 +1,286 @@
+# Rather than pass in the aws security group, we just look it up. This will
+# probably be useful other places, as well.
+data "aws_security_group" "typical-host" {
+  name   = "typical-host"
+  vpc_id = var.vpc_id
+}
+
+# Use the default EBS key
+data "aws_kms_key" "ebs-key" {
+  key_id = "alias/ebs_root_encrypt_decrypt"
+}
+
+resource "aws_network_interface" "instance" {
+  subnet_id = var.subnets[0]
+  security_groups = [ data.aws_security_group.typical-host.id, aws_security_group.salt_master_security_group.id ]
+  description = var.instance_name
+  tags = merge(var.standard_tags, var.tags, { Name = var.instance_name })
+}
+
+resource "aws_eip" "instance" {
+  vpc = true
+  tags = merge(var.standard_tags, var.tags, { Name = var.instance_name })
+}
+
+resource "aws_eip_association" "instance" {
+  network_interface_id = aws_network_interface.instance.id
+  allocation_id = aws_eip.instance.id
+}
+
+resource "aws_instance" "instance" {
+  #availability_zone = var.azs[count.index % 2]
+  tenancy = "default"
+  ebs_optimized = true
+  disable_api_termination = var.instance_termination_protection
+  instance_initiated_shutdown_behavior = "stop"
+  instance_type = var.instance_type
+  key_name = "msoc-build"
+  monitoring = false
+  iam_instance_profile = "salt-master-instance-profile"
+
+  ami = local.ami_map["master"]
+  # We need to ignore ebs_block_device changes, because if the AMI changes, so does the snapshot_id.
+  # If they add a feature to block more specific changes (eg `ebs_block_devices[*].snapshot_id`), then
+  # that could be removed.
+  lifecycle { ignore_changes = [ ami, key_name, user_data, ebs_block_device ] }
+
+  # These device definitions are optional, but added for clarity.
+  root_block_device {
+      volume_type = "gp2"
+      #volume_size = "60"
+      delete_on_termination = true
+      encrypted = true
+      kms_key_id = data.aws_kms_key.ebs-key.arn
+  }
+
+  ebs_block_device {
+    # swap
+    device_name = "/dev/xvdm"
+    volume_size = 48
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    # Snapshot IDs need to be grabbed from the ami, or it will replace every time. It's ugly.
+    # This may prompt replacement when the AMI is updated.
+    # See:
+    #   https://github.com/hashicorp/terraform/issues/19958
+    #   https://github.com/terraform-providers/terraform-provider-aws/issues/13118
+    snapshot_id = local.block_device_mappings["master"]["/dev/xvdm"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /home
+    device_name = "/dev/xvdn"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["master"]["/dev/xvdn"].ebs.snapshot_id
+
+  }
+  ebs_block_device {
+    # /var
+    device_name = "/dev/xvdo"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["master"]["/dev/xvdo"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/tmp
+    device_name = "/dev/xvdp"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["master"]["/dev/xvdp"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log
+    device_name = "/dev/xvdq"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["master"]["/dev/xvdq"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /var/log/audit
+    device_name = "/dev/xvdr"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["master"]["/dev/xvdr"].ebs.snapshot_id
+  }
+  ebs_block_device {
+    # /tmp
+    device_name = "/dev/xvds"
+    # volume_size = xx
+    delete_on_termination = true
+    encrypted = true
+    kms_key_id = data.aws_kms_key.ebs-key.arn
+    snapshot_id = local.block_device_mappings["master"]["/dev/xvds"].ebs.snapshot_id
+  }
+
+  network_interface {
+    device_index = 0
+    network_interface_id = aws_network_interface.instance.id
+  }
+
+  user_data = data.template_cloudinit_config.salt_master_cloud_init_config.rendered
+  tags = merge( var.standard_tags, var.tags, { Name = var.instance_name })
+}
+
+#Uncommnet this when we are ready to make the change. 
+# module "private_dns_record" {
+#   source = "../../submodules/dns/private_A_record"
+
+#   name = var.instance_name
+#   ip_addresses = [ aws_instance.instance.private_ip ]
+#   dns_info = var.dns_info
+#   reverse_enabled = var.reverse_enabled
+
+#   providers = {
+#     aws.c2 = aws.c2
+#   }
+# }
+
+# module "public_dns_record" {
+#   source = "../../submodules/dns/public_A_record"
+
+#   name = var.instance_name
+#   ip_addresses = [ aws_eip.instance.public_ip ]
+#   dns_info = var.dns_info
+
+#   providers = {
+#     aws.mdr-common-services-commercial = aws.mdr-common-services-commercial
+#   }
+# }
+
+#The Cloud init data is to prepare the Salt Master for use. 
+#This includes secrets from the AWS Secrets Manager, Github connectivity via SSH, and
+#prepopulating the salt master private key. May history judge me kindly.  
+data "template_file" "salt_master_cloud_init" {
+  # Should these be in a common directory? I suspect they'd be reusable
+  template = "${file("${path.module}/cloud-init/cloud_init_salt_master.tpl")}"
+
+  vars = {
+    hostname = var.instance_name
+    fqdn = "${var.instance_name}.${var.dns_info["private"]["zone"]}"
+    environment = var.environment
+    salt_master  = var.salt_master
+    proxy = var.proxy
+    aws_partition = var.aws_partition
+    aws_partition_alias = var.aws_partition_alias
+  }
+}
+
+# Render a multi-part cloud-init config making use of the part
+# above, and other source files
+data "template_cloudinit_config" "salt_master_cloud_init_config" {
+  gzip          = true
+  base64_encode = true
+
+  # Main cloud-config configuration file.
+  part {
+    filename     = "init.cfg"
+    content_type = "text/cloud-config"
+    content      = "${data.template_file.salt_master_cloud_init.rendered}"
+  }
+
+  # Additional parts as needed
+  part {
+    content_type = "text/x-shellscript"
+    content      = "${file("${path.module}/cloud-init/provision_salt_master.sh")}"
+  }
+}
+
+resource "aws_security_group" "salt_master_security_group" {
+  name = "salt_master_security_group"
+  description = "Security Group for Salt Master(s)"
+  vpc_id = var.vpc_id
+  tags = merge(var.standard_tags, var.tags)
+}
+
+resource "aws_security_group_rule" "http-out" {
+  description = "For endpoints and troubleshooting"
+  type = "egress"
+  from_port = 80
+  to_port = 80
+  protocol = "tcp"
+  cidr_blocks = [ "10.0.0.0/8" ]
+  security_group_id = aws_security_group.salt_master_security_group.id
+}
+
+resource "aws_security_group_rule" "https-out" {
+  description = "For endpoints and troubleshooting"
+  type = "egress"
+  from_port = 443
+  to_port = 443
+  protocol = "tcp"
+  cidr_blocks = [ "10.0.0.0/8" ]
+  security_group_id = aws_security_group.salt_master_security_group.id
+}
+
+resource "aws_security_group_rule" "saltstack" {
+  description = "SaltStack"
+  type = "ingress"
+  from_port = "4505"
+  to_port = "4506"
+  protocol = "tcp"
+  cidr_blocks = [ "10.0.0.0/8" ]
+  security_group_id = aws_security_group.salt_master_security_group.id
+}
+
+resource "aws_security_group_rule" "saltstack-afs-pop" {
+  description = "SaltStack - AFS POP"
+  type = "ingress"
+  from_port = "4505"
+  to_port = "4506"
+  protocol = "tcp"
+  cidr_blocks = var.afs_pop
+  security_group_id = aws_security_group.salt_master_security_group.id
+}
+
+resource "aws_security_group_rule" "saltstack-afs-azure-pop" {
+  description = "SaltStack - AFS Azure POP"
+  type = "ingress"
+  from_port = "4505"
+  to_port = "4506"
+  protocol = "tcp"
+  cidr_blocks = var.afs_azure_pop
+  security_group_id = aws_security_group.salt_master_security_group.id
+}
+
+resource "aws_security_group_rule" "saltstack-nga-pop" {
+  description = "SaltStack - NGA POP"
+  type = "ingress"
+  from_port = "4505"
+  to_port = "4506"
+  protocol = "tcp"
+  cidr_blocks = var.nga_pop
+  security_group_id = aws_security_group.salt_master_security_group.id
+}
+
+resource "aws_security_group_rule" "saltstack-xdr-interconnects" {
+  description = "SaltStack - XDR Interconnects"
+  type = "ingress"
+  from_port = "4505"
+  to_port = "4506"
+  protocol = "tcp"
+  cidr_blocks = var.xdr_interconnect
+  security_group_id = aws_security_group.salt_master_security_group.id
+}
+
+#TODO: make this better
+#for now, just allow 22 outbound anywhere
+resource "aws_security_group_rule" "saltstack-github" {
+  description = "SaltStack - Github Access"
+  type = "egress"
+  from_port = "22"
+  to_port = "22"
+  protocol = "tcp"
+  cidr_blocks = [ "0.0.0.0/0" ]
+  security_group_id = aws_security_group.salt_master_security_group.id
+}

+ 11 - 0
base/salt_master/outputs.tf

@@ -0,0 +1,11 @@
+output instance_arn {
+  value = aws_instance.instance.arn
+}
+
+output instance_public_ip {
+  value = aws_eip.instance.public_ip
+}
+
+output instance_private_ip {
+  value = aws_instance.instance.private_ip
+}

+ 52 - 0
base/salt_master/vars.tf

@@ -0,0 +1,52 @@
+variable "instance_name" {
+  description = "Hostname, DNS entry, etc."
+  type = string
+}
+
+variable "azs" {
+  type = list(string)
+}
+
+variable "subnets" {
+  type = list(string)
+}
+
+variable "vpc_id" {
+  type = string
+}
+
+variable "tags" {
+  description = "Tags to add to the resource (in addition to global standard tags)"
+  type        = map
+  default     = { }
+}
+
+variable "instance_type" { 
+  type = string
+  default = "t3a.micro"
+}
+
+variable "reverse_enabled" { 
+  description = "Whether to create the reverse DNS entry."
+  type = bool
+  default = true
+}
+
+variable "trusted_ips" { type = list(string) }
+variable "xdr_interconnect" { type = list(string) }
+variable "nga_pop" { type = list(string) }
+variable "afs_azure_pop" { type = list(string) }
+variable "afs_pop" { type = list(string) }
+variable "proxy" { type = string }
+variable "salt_master" { type = string }
+
+variable "cidr_map" { type = map }
+variable "dns_info" { type = map }
+variable "standard_tags" { type = map }
+variable "environment" { type = string }
+variable "aws_region" { type = string }
+variable "aws_partition" { type = string }
+variable "aws_partition_alias" { type = string }
+variable "aws_account_id" { type = string }
+variable "common_services_account" { type = string }
+variable "instance_termination_protection" { type = bool }

+ 3 - 0
base/salt_master/version.tf

@@ -0,0 +1,3 @@
+terraform {
+  required_version = "~> 0.13"
+}