cloud-init.tpl 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. #cloud-config
  2. preserve_hostname: false
  3. salt-master: ${salt_master}
  4. # Write files happens early
  5. write_files:
  6. - content: |
  7. proxy=http://${proxy}:80
  8. path: /etc/yum.conf
  9. append: true
  10. - content: |
  11. proxy_host: ${proxy}
  12. proxy_port: 80
  13. path: /etc/salt/minion.d/proxy.conf
  14. - content: |
  15. [global]
  16. proxy=${proxy}
  17. path: /etc/pip.conf
  18. - content: |
  19. export HTTPS_PROXY=http://${proxy}:80
  20. export HTTP_PROXY=http://${proxy}:80
  21. export NO_PROXY=localhost,127.0.0.1,169.254.169.254,pvt.xdrtest.accenturefederalcyber.com,pvt.xdr.accenturefederalcyber.com,reposerver.msoc.defpoint.local,jenkins.msoc.defpoint.local,pod1search-splunk-sh.msoc.defpoint.local,s3.amazonaws.com,ssm.${ aws_region }.amazonaws.com,ec2messages.${ aws_region }.amazonaws.com,ec2.${ aws_region }.amazonaws.com,ssmmessages.${ aws_region }.amazonaws.com,iratemoses.mdr.defpoint.com,jira.mdr.defpoint.com,reposerver.pvt.xdr.accenturefederalcyber.com,jenkins.pvt.xdr.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdr.accenturefederalcyber.com,reposerver.pvt.xdrtest.accenturefederalcyber.com,jenkins.pvt.xdrtest.accenturefederalcyber.com,pod1search-splunk-sh.pvt.xdrtest.accenturefederalcyber.com,iratemoses.xdr.accenturefederalcyber.com,jira.xdr.accenturefederalcyber.com,iratemoses.xdrtest.accenturefederalcyber.com,jira.xdrtest.accenturefederalcyber.com
  22. export https_proxy=$HTTPS_PROXY
  23. export http_proxy=$HTTP_PROXY
  24. export no_proxy=$NO_PROXY
  25. path: /etc/profile.d/proxy.sh
  26. # indexers don't know their fqdn until boot, so this is created below in bootcmd
  27. #- content: |
  28. # $ {fqdn}
  29. # path: /etc/salt/minion_id
  30. - content: |
  31. master: ${salt_master}
  32. #log_level: debug
  33. path: /etc/salt/minion
  34. - content: |
  35. grains:
  36. environment: ${ environment }
  37. aws_partition: ${ aws_partition }
  38. aws_partition_alias: ${ aws_partition_alias }
  39. splunk_prefix: ${ splunk_prefix }
  40. aws_region: ${ aws_region }
  41. path: /etc/salt/minion.d/cloud_init_grains.conf
  42. #yum_repos:
  43. # epel-release:
  44. # baseurl: http://download.fedoraproject.org/pub/epel/7/$basearch
  45. # enabled: false
  46. # failovermethod: priority
  47. # gpgcheck: true
  48. # gpgkey: http://download.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
  49. # name: Extra Packages for Enterprise Linux 7 - Release
  50. packages:
  51. - vim
  52. package_update: true # Always patch
  53. growpart:
  54. mode: auto
  55. devices: [ '/', '/var', '/var/log', '/var/log/audit', '/var/tmp', '/tmp', '/home' ]
  56. ignore_growroot_disabled: false
  57. bootcmd:
  58. - "INSTANCE_ID=`/usr/bin/curl -f --connect-timeout 1 --silent http://169.254.169.254/latest/meta-data/instance-id | tail -c 3`"
  59. - "/bin/hostnamectl set-hostname ${prefix}-splunk-idx-$INSTANCE_ID'.${zone}'"
  60. - "echo ${prefix}-splunk-idx-$INSTANCE_ID'.${zone}' > /etc/salt/minion_id"
  61. runcmd:
  62. - /bin/systemctl restart salt-minion
  63. - /bin/systemctl enable salt-minion
  64. - /bin/systemctl start amazon-ssm-agent
  65. - /bin/systemctl enable amazon-ssm-agent
  66. - /usr/sbin/aide --update --verbose=0
  67. - /bin/cp /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz
  68. # For indexers only
  69. # legacy slept for 20 seconds, but I think aide update will take care of that delay
  70. - /bin/salt-call saltutil.sync_all
  71. # Chicken/egg problem. We need pillars to get correct grains, and grains to get correct pillars.
  72. - /bin/salt-call --refresh-grains-cache saltutil.refresh_pillar
  73. - /bin/sleep 1
  74. - /bin/salt-call --refresh-grains-cache saltutil.refresh_grains
  75. - /bin/sleep 1
  76. - /bin/salt-call --refresh-grains-cache saltutil.refresh_pillar
  77. - /bin/sleep 1
  78. - /bin/salt-call --refresh-grains-cache saltutil.refresh_grains
  79. - /bin/sleep 1
  80. # Recording our initial values is useful for troubleshooting
  81. - /bin/salt-call pillar.items > /root/pillars.initial_highstate.yml
  82. - /bin/salt-call grains.items > /root/grains.initial_highstate.yml
  83. - "/bin/echo MARKER: START FIRST HIGHSTATE"
  84. - /bin/salt-call state.highstate
  85. - "/bin/echo MARKER: END FIRST HIGHSTATE"
  86. - "/bin/echo MARKER: START SECOND HIGHSTATE"
  87. - /bin/salt-call state.highstate
  88. - "/bin/echo MARKER: END SECOND HIGHSTATE"
  89. # Either final message or power state, but probably not both
  90. final_message: "The system is up after $UPTIME seconds"
  91. #power_state:
  92. # delay: "+30"
  93. # mode: reboot
  94. # message: "System configured after $UPTIME seconds"
  95. # timeout: 300
  96. # condition: true