123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104 |
- #cloud-config
- preserve_hostname: false
- salt-master: ${salt_master}
- # Write files happens early
- write_files:
- - content: |
- proxy=http://${proxy}:80
- path: /etc/yum.conf
- append: true
- - content: |
- proxy_host: ${proxy}
- proxy_port: 80
- path: /etc/salt/minion.d/proxy.conf
- - content: |
- [global]
- proxy=${proxy}
- path: /etc/pip.conf
- - content: |
- export HTTPS_PROXY=http://${proxy}:80
- export HTTP_PROXY=http://${proxy}:80
- export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.${ aws_region }.amazonaws.com,ec2messages.${ aws_region }.amazonaws.com,ec2.${ aws_region }.amazonaws.com,ssmmessages.${ aws_region }.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
- export https_proxy=$HTTPS_PROXY
- export http_proxy=$HTTP_PROXY
- export no_proxy=$NO_PROXY
- path: /etc/profile.d/proxy.sh
- # indexers don't know their fqdn until boot, so this is created below in bootcmd
- #- content: |
- # $ {fqdn}
- # path: /etc/salt/minion_id
- - content: |
- master: ${salt_master}
- #log_level: debug
- path: /etc/salt/minion
- - content: |
- grains:
- environment: ${ environment }
- aws_partition: ${ aws_partition }
- aws_partition_alias: ${ aws_partition_alias }
- splunk_prefix: ${ splunk_prefix }
- aws_region: ${ aws_region }
- path: /etc/salt/minion.d/cloud_init_grains.conf
- #yum_repos:
- # epel-release:
- # baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
- # enabled: false
- # failovermethod: priority
- # gpgcheck: true
- # gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
- # name: Extra Packages for Enterprise Linux 7 - Release
- packages:
- - vim
- package_update: true # Always patch
- growpart:
- mode: auto
- devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
- ignore_growroot_disabled: false
- bootcmd:
- - "INSTANCE_ID=`/usr/bin/curl -f --connect-timeout 1 --silent http://169.254.169.254/latest/meta-data/instance-id | tail -c 3`"
- - "/bin/hostnamectl set-hostname ${prefix}-splunk-idx-$INSTANCE_ID'.${zone}'"
- - "echo ${prefix}-splunk-idx-$INSTANCE_ID'.${zone}' > /etc/salt/minion_id"
- runcmd:
- - /bin/systemctl restart salt-minion
- - /bin/systemctl enable salt-minion
- - /bin/systemctl start amazon-ssm-agent
- - /bin/systemctl enable amazon-ssm-agent
- - /usr/sbin/aide --update --verbose=0
- - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
- # For indexers only
- # legacy slept for 20 seconds, but I think aide update will take care of that delay
- - /bin/salt-call saltutil.sync_all
- # Chicken/egg problem. We need pillars to get correct grains, and grains to get correct pillars.
- - /bin/salt-call --refresh-grains-cache saltutil.refresh_pillar
- - /bin/sleep 1
- - /bin/salt-call --refresh-grains-cache saltutil.refresh_grains
- - /bin/sleep 1
- - /bin/salt-call --refresh-grains-cache saltutil.refresh_pillar
- - /bin/sleep 1
- - /bin/salt-call --refresh-grains-cache saltutil.refresh_grains
- - /bin/sleep 1
- # Recording our initial values is useful for troubleshooting
- - /bin/salt-call pillar.items > /root/pillars.initial_highstate.yml
- - /bin/salt-call grains.items > /root/grains.initial_highstate.yml
- - "/bin/echo MARKER: START FIRST HIGHSTATE"
- - /bin/salt-call state.highstate
- - "/bin/echo MARKER: END FIRST HIGHSTATE"
- - "/bin/echo MARKER: START SECOND HIGHSTATE"
- - /bin/salt-call state.highstate
- - "/bin/echo MARKER: END SECOND HIGHSTATE"
- # Either final message or power state, but probably not both
- final_message: "The system is up after $UPTIME seconds"
- #power_state:
- # delay: "+30"
- # mode: reboot
- # message: "System configured after $UPTIME seconds"
- # timeout: 300
- # condition: true
|