Browse Source

Saving progress on jupyterhub and humio

Fred Damstra (k8s1) 2 years ago
parent
commit
57366bb854

+ 44 - 0
Workloads/jupyterhub/config.yaml

@@ -0,0 +1,44 @@
+### Example config.yaml
+#proxy:
+#  https:
+#    enabled: true
+#    hosts:
+#      - jupyter.myschool.edu
+#    letsencrypt:
+#      contactEmail: me@myschool.edu
+#  service:
+#    loadBalancerIP: 10.0.0.150
+singleuser:
+  storage:
+    capacity: 2Gi
+    dynamic:
+      storageClass: managed-nfs-storage
+  extraEnv:
+    GRANT_SUDO: "yes"
+  memory:
+    guarantee: 256M
+  cpu:
+    guarantee: 0.25
+proxy:
+  #https:
+  #  enabled: true
+  #  type: offload
+  secretToken: "72C1E2FFA60E400D8D80D71E1D48354D"
+  service:
+    type: ClusterIP
+hub:
+  db:
+    pvc:
+      annotations:
+        nfs.io/storage-path: "user-shared"
+      storageClassName: managed-nfs-storage
+  config:
+    Authenticator:
+      admin_users:
+        - fdamstra
+      allowed_users:
+        - fdamstra
+    DummyAuthenticator:
+      password: Bluem00n
+    JupyterHub:
+      authenticator_class: dummy

+ 31 - 0
Workloads/jupyterhub/jhub-ingress.yaml

@@ -0,0 +1,31 @@
+# Hosting
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: jupyterhub
+  namespace: jhub
+  annotations:
+    cert-manager.io/cluster-issuer: "letsencrypt-prod"
+    ## basic auth for index
+    # type of authentication
+    #nginx.ingress.kubernetes.io/auth-type: basic
+    # name of the secret that contains the user/password definitions
+    #nginx.ingress.kubernetes.io/auth-secret: basic-auth
+    # message to display with an appropriate context why the authentication is required
+    #nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - Jupyterhub'
+spec:
+  tls:
+  - hosts:
+    - jupyterhub.monkeybox.org
+    secretName: jupyterhub-tls
+  rules:
+  - host: jupyterhub.monkeybox.org
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: proxy-public
+            port:
+              number: 80

+ 103 - 0
Workloads/jupyterhub/readme.md

@@ -0,0 +1,103 @@
+Ran:
+
+```
+$ helm upgrade --cleanup-on-fail --install jhub jupyterhub/jupyterhub --namespace jhub --create-namespace --version=2.0.0 --values config.yaml
+Release "jhub" does not exist. Installing it now.
+W0324 14:00:31.487881 2324251 reflector.go:347] k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169: watch of *unstructured.Unstructured ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection lost") has prevented the request from succeeding
+W0324 14:00:43.154150 2324251 reflector.go:424] k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169: failed to list *unstructured.Unstructured: Get "https://10.42.42.201:16443/apis/batch/v1/namespaces/jhub/jobs?fieldSelector=metadata.name%3Dhook-image-awaiter&resourceVersion=135172114": net/http: TLS handshake timeout
+I0324 14:00:43.154534 2324251 trace.go:205] Trace[513888607]: "Reflector ListAndWatch" name:k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169 (24-Mar-2023 14:00:32.754) (total time: 10400ms):
+Trace[513888607]: ---"Objects listed" error:Get "https://10.42.42.201:16443/apis/batch/v1/namespaces/jhub/jobs?fieldSelector=metadata.name%3Dhook-image-awaiter&resourceVersion=135172114": net/http: TLS handshake timeout 10399ms (14:00:43.154)
+Trace[513888607]: [10.40025853s] [10.40025853s] END
+E0324 14:00:43.160675 2324251 reflector.go:140] k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169: Failed to watch *unstructured.Unstructured: failed to list *unstructured.Unstructured: Get "https://10.42.42.201:16443/apis/batch/v1/namespaces/jhub/jobs?fieldSelector=metadata.name%3Dhook-image-awaiter&resourceVersion=135172114": net/http: TLS handshake timeout
+W0324 14:00:55.488889 2324251 reflector.go:424] k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169: failed to list *unstructured.Unstructured: Get "https://10.42.42.201:16443/apis/batch/v1/namespaces/jhub/jobs?fieldSelector=metadata.name%3Dhook-image-awaiter&resourceVersion=135172114": net/http: TLS handshake timeout
+I0324 14:00:55.489068 2324251 trace.go:205] Trace[1860942638]: "Reflector ListAndWatch" name:k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169 (24-Mar-2023 14:00:45.476) (total time: 10012ms):
+Trace[1860942638]: ---"Objects listed" error:Get "https://10.42.42.201:16443/apis/batch/v1/namespaces/jhub/jobs?fieldSelector=metadata.name%3Dhook-image-awaiter&resourceVersion=135172114": net/http: TLS handshake timeout 10011ms (14:00:55.488)
+Trace[1860942638]: [10.012014173s] [10.012014173s] END
+E0324 14:00:55.489139 2324251 reflector.go:140] k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169: Failed to watch *unstructured.Unstructured: failed to list *unstructured.Unstructured: Get "https://10.42.42.201:16443/apis/batch/v1/namespaces/jhub/jobs?fieldSelector=metadata.name%3Dhook-image-awaiter&resourceVersion=135172114": net/http: TLS handshake timeout
+W0324 14:01:11.607181 2324251 reflector.go:424] k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169: failed to list *unstructured.Unstructured: Get "https://10.42.42.201:16443/apis/batch/v1/namespaces/jhub/jobs?fieldSelector=metadata.name%3Dhook-image-awaiter&resourceVersion=135172114": net/http: TLS handshake timeout
+I0324 14:01:11.607460 2324251 trace.go:205] Trace[1449464739]: "Reflector ListAndWatch" name:k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169 (24-Mar-2023 14:01:01.600) (total time: 10007ms):
+Trace[1449464739]: ---"Objects listed" error:Get "https://10.42.42.201:16443/apis/batch/v1/namespaces/jhub/jobs?fieldSelector=metadata.name%3Dhook-image-awaiter&resourceVersion=135172114": net/http: TLS handshake timeout 10006ms (14:01:11.607)
+Trace[1449464739]: [10.007141276s] [10.007141276s] END
+E0324 14:01:11.607561 2324251 reflector.go:140] k8s.io/client-go@v0.25.2/tools/cache/reflector.go:169: Failed to watch *unstructured.Unstructured: failed to list *unstructured.Unstructured: Get "https://10.42.42.201:16443/apis/batch/v1/namespaces/jhub/jobs?fieldSelector=metadata.name%3Dhook-image-awaiter&resourceVersion=135172114": net/http: TLS handshake timeout
+NAME: jhub
+LAST DEPLOYED: Fri Mar 24 13:58:04 2023
+NAMESPACE: jhub
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+.      __                          __                  __  __          __
+      / / __  __  ____    __  __  / /_  ___    _____  / / / / __  __  / /_
+ __  / / / / / / / __ \  / / / / / __/ / _ \  / ___/ / /_/ / / / / / / __ \
+/ /_/ / / /_/ / / /_/ / / /_/ / / /_  /  __/ / /    / __  / / /_/ / / /_/ /
+\____/  \__,_/ / .___/  \__, /  \__/  \___/ /_/    /_/ /_/  \__,_/ /_.___/
+              /_/      /____/
+
+       You have successfully installed the official JupyterHub Helm chart!
+
+### Installation info
+
+  - Kubernetes namespace: jhub
+  - Helm release name:    jhub
+  - Helm chart version:   2.0.0
+  - JupyterHub version:   3.0.0
+  - Hub pod packages:     See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/2.0.0/images/hub/requirements.txt
+
+### Followup links
+
+  - Documentation:  https://z2jh.jupyter.org
+  - Help forum:     https://discourse.jupyter.org
+  - Social chat:    https://gitter.im/jupyterhub/jupyterhub
+  - Issue tracking: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues
+
+### Post-installation checklist
+
+  - Verify that created Pods enter a Running state:
+
+      kubectl --namespace=jhub get pod
+
+    If a pod is stuck with a Pending or ContainerCreating status, diagnose with:
+
+      kubectl --namespace=jhub describe pod <name of pod>
+
+    If a pod keeps restarting, diagnose with:
+
+      kubectl --namespace=jhub logs --previous <name of pod>
+
+  - Verify an external IP is provided for the k8s Service proxy-public.
+
+      kubectl --namespace=jhub get service proxy-public
+
+    If the external ip remains <pending>, diagnose with:
+
+      kubectl --namespace=jhub describe service proxy-public
+
+  - Verify web based access:
+
+    You have not configured a k8s Ingress resource so you need to access the k8s
+    Service proxy-public directly.
+
+    If your computer is outside the k8s cluster, you can port-forward traffic to
+    the k8s Service proxy-public with kubectl to access it from your
+    computer.
+
+      kubectl --namespace=jhub port-forward service/proxy-public 8080:http
+
+    Try insecure HTTP access: http://localhost:8080
+```
+
+Then I did a lot of shit with storage, and it wouldn't let me modify it, so I uninstalled, redid the storage, then reinstalled:
+
+```
+helm uninstall jhub jupyterhub/jupyterhub --namespace jhub
+kubectl delete -f storage.yaml
+kubectl -n jhub get pv
+kubectl -n jhub delete pv pvc-1aa270e4-e14f-4425-b10d-e1b18861bb1f
+kubectl apply -f storage.yaml
+```
+
+Then reran the apply:
+
+```
+$ helm upgrade --cleanup-on-fail --install jhub jupyterhub/jupyterhub --namespace jhub --create-namespace --version=2.0.0 --values config.yaml
+ZZ

+ 117 - 0
Workloads/jupyterhub/storage.yaml

@@ -0,0 +1,117 @@
+#---
+#apiVersion: v1
+#kind: PersistentVolume
+#metadata:
+#  name: hub-db-dir
+#  namespace: jhub
+#spec:
+#  capacity:
+#    storage: 1Gi
+#  volumeMode: Filesystem
+#  accessModes:
+#    - ReadWriteMany
+#  persistentVolumeReclaimPolicy: Retain # Keep 4eva
+#  storageClassName: managed-nfs-storage
+#  mountOptions:
+#    - hard
+#    - nfsvers=3
+#  nfs:
+#    path: /mnt/DroboFS/Shares/Kubernetes/volumes/static/hub-db-dir
+#    server: 10.42.42.10
+#  claimRef:
+#    name: hub-db-dir
+#    namespace: jhub
+#---
+#apiVersion: v1
+#kind: PersistentVolumeClaim
+#metadata:
+#  name: hub-db-dir
+#  namespace: jhub
+#  annotations:
+#    nfs.io/storage-path: "hub-db-dir"
+#spec:
+#  storageClassName: managed-nfs-storage
+#  accessModes:
+#    - ReadWriteMany
+#  resources:
+#    requests:
+#      storage: 1Gi
+#status: {}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: claim-admin
+  namespace: jhub
+spec:
+  capacity:
+    storage: 10Gi
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  persistentVolumeReclaimPolicy: Retain # Keep 4eva
+  storageClassName: managed-nfs-storage
+  mountOptions:
+    - hard
+    - nfsvers=3
+  nfs:
+    path: /mnt/DroboFS/Shares/Kubernetes/volumes/static/claim-admin
+    server: 10.42.42.10
+  claimRef:
+    name: claim-admin
+    namespace: jhub
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: claim-admin
+  namespace: jhub
+  annotations:
+    nfs.io/storage-path: "claim-admin"
+spec:
+  storageClassName: managed-nfs-storage
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 1Gi
+status: {}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: user-shared
+  namespace: jhub
+spec:
+  capacity:
+    storage: 10Gi
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  persistentVolumeReclaimPolicy: Retain # Keep 4eva
+  storageClassName: managed-nfs-storage
+  mountOptions:
+    - hard
+    - nfsvers=3
+  nfs:
+    path: /mnt/DroboFS/Shares/Kubernetes/volumes/static/jhub-user-shared
+    server: 10.42.42.10
+  claimRef:
+    name: user-shared
+    namespace: jhub
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: user-shared
+  namespace: jhub
+  annotations:
+    nfs.io/storage-path: "user-shared"
+spec:
+  storageClassName: managed-nfs-storage
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 1Gi
+status: {}

+ 257 - 0
Workloads/testing/humio.yaml

@@ -0,0 +1,257 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: humio
+  labels:
+    run: humio
+spec:
+  ports:
+    - name: zookeeper
+      port: 2181
+      targetPort: 2181
+    - name: kafka
+      port: 9092
+      targetPort: 9092
+    - name: http
+      port: 9081
+      targetPort: 9081
+  selector:
+    run: humio
+#  type: LoadBalancer
+#status:
+#  loadBalancer: {}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: humio-data
+spec:
+  capacity:
+    storage: 10Ti
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  persistentVolumeReclaimPolicy: Retain # Keep 4eva
+  storageClassName: default
+  mountOptions:
+    - hard
+    - nfsvers=3
+  nfs:
+    path: /mnt/DroboFS/Shares/Kubernetes/volumes/static/humio-data
+    server: 10.42.42.10
+  claimRef:
+    name: humio-data
+    namespace: default
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: humio-data
+spec:
+  storageClassName: standard
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 10Ti
+status: {}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: humio-config
+spec:
+  capacity:
+    storage: 5Mi
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  persistentVolumeReclaimPolicy: Retain # Keep 4eva
+  storageClassName: default
+  mountOptions:
+    - hard
+    - nfsvers=3
+  nfs:
+    path: /mnt/DroboFS/Shares/Kubernetes/volumes/static/humio-config
+    server: 10.42.42.10
+  claimRef:
+    name: humio-config
+    namespace: default
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: humio-config
+  annotations:
+    nfs.io/storage-path: "humio-config"
+spec:
+  storageClassName: default
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 5Mi
+status: {}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: humio-kafka-data
+spec:
+  capacity:
+    storage: 5Mi
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  persistentVolumeReclaimPolicy: Retain # Keep 4eva
+  storageClassName: default
+  mountOptions:
+    - hard
+    - nfsvers=3
+  nfs:
+    path: /mnt/DroboFS/Shares/Kubernetes/volumes/static/humio-kafka-data
+    server: 10.42.42.10
+  claimRef:
+    name: humio-kafka-data
+    namespace: default
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: humio-kafka-data
+  annotations:
+    nfs.io/storage-path: "humio-kafka-data"
+spec:
+  storageClassName: default
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 5Mi
+status: {}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: humio
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      run: humio
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+        run: humio
+    spec:
+      terminationGracePeriodSeconds: 30
+      containers:
+        - image: krishnarajeshs/humio_arm:v1.0
+          imagePullPolicy: "Always"
+          name: humio
+          env:
+            - name: TZ
+              value: US/Michigan
+            - name: user
+              value: root
+            - name: HUMIO_PORT
+              value: "9081"
+            - name: ELASTIC_PORT
+              value: "9202"
+            - name: ZOOKEEPER_URL
+              value: "127.0.0.1:2181"
+            - name: KAFKA_SERVERS
+              value: "127.0.0.1:9092"
+            - name: EXTERNAL_URL
+              value: "http://127.0.0.1:9081"
+            - name: PUBLIC_URL
+              value: "https://humio.monkeybox.org"
+            - name: HUMIO_SOCKET_BIND
+              value: "0.0.0.0"
+            - name: HUMIO_HTTP_BIND
+              value: "0.0.0.0"
+            - name: HUMIO_JVM_ARGS
+              value: "-Xss2M"
+            - name: AUTHENTICATION_METHOD
+              value: "single-user"
+            - name: SINGLE_USER_USERNAME
+              value: "fdamstra"
+            - name: SINGLE_USER_PASSWORD
+              value: "Bluem00n"
+          ports:
+            - containerPort: 2181
+            - containerPort: 9092
+            - containerPort: 9081
+          resources:
+            limits:
+              memory: "4096Mi"
+              cpu: "2000m"
+            requests:
+              memory: "500Mi"
+              cpu: "1000m"
+          volumeMounts:
+            - mountPath: /data
+              name: humio-data
+            - mountPath: /etc/humio
+              name: humio-config
+            - mountPath: /data/kafka-data
+              name: humio-kafka-data
+      restartPolicy: Always
+      volumes:
+        - name: humio-config
+          persistentVolumeClaim:
+            claimName: humio-config
+        - name: humio-data
+          persistentVolumeClaim:
+            claimName: humio-data
+        - name: humio-kafka-data
+          persistentVolumeClaim:
+            claimName: humio-kafka-data
+#      dnsPolicy: "None"
+#      dnsConfig:
+#        nameservers:
+#          - 10.42.42.239
+#          - 10.42.42.1
+#        searches:
+#          - default.svc.cluster.local
+#          - svc.cluster.local 
+#          - cluster.local
+#        options:
+#          - name: ndots
+#            value: "2"
+#          - name: edns0
+#          - name: trust-ad
+status: {}
+---
+# Hosting
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: humio-ingress
+  annotations:
+    cert-manager.io/cluster-issuer: "letsencrypt-prod"
+    ## No basic auth for humio
+    ## type of authentication
+    #nginx.ingress.kubernetes.io/auth-type: basic
+    ## name of the secret that contains the user/password definitions
+    #nginx.ingress.kubernetes.io/auth-secret: basic-auth
+    ## message to display with an appropriate context why the authentication is required
+    #nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - Octoprint'
+spec:
+  tls:
+  - hosts:
+    - humio.monkeybox.org
+    secretName: humio-tls
+  rules:
+  - host: humio.monkeybox.org
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: humio
+            port:
+              number: 9081

+ 251 - 0
Workloads/testing/humio.yaml.originaldirectories

@@ -0,0 +1,251 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: humio
+  labels:
+    run: humio
+spec:
+  ports:
+    - name: zookeeper
+      port: 2181
+      targetPort: 2181
+    - name: kafka
+      port: 9092
+      targetPort: 9092
+    - name: http
+      port: 9081
+      targetPort: 9081
+  selector:
+    run: humio
+#  type: LoadBalancer
+#status:
+#  loadBalancer: {}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: humio-data
+spec:
+  capacity:
+    storage: 10Ti
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  persistentVolumeReclaimPolicy: Retain # Keep 4eva
+  storageClassName: default
+  mountOptions:
+    - hard
+    - nfsvers=3
+  nfs:
+    path: /mnt/DroboFS/Shares/Kubernetes/volumes/static/humio-data
+    server: 10.42.42.10
+  claimRef:
+    name: humio-data
+    namespace: default
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: humio-data
+spec:
+  storageClassName: standard
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 10Ti
+status: {}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: humio-config
+spec:
+  capacity:
+    storage: 5Mi
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  persistentVolumeReclaimPolicy: Retain # Keep 4eva
+  storageClassName: default
+  mountOptions:
+    - hard
+    - nfsvers=3
+  nfs:
+    path: /mnt/DroboFS/Shares/Kubernetes/volumes/static/humio-config
+    server: 10.42.42.10
+  claimRef:
+    name: humio-config
+    namespace: default
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: humio-config
+  annotations:
+    nfs.io/storage-path: "humio-config"
+spec:
+  storageClassName: default
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 5Mi
+status: {}
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: humio-kafka-data
+spec:
+  capacity:
+    storage: 5Mi
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  persistentVolumeReclaimPolicy: Retain # Keep 4eva
+  storageClassName: default
+  mountOptions:
+    - hard
+    - nfsvers=3
+  nfs:
+    path: /mnt/DroboFS/Shares/Kubernetes/volumes/static/humio-kafka-data
+    server: 10.42.42.10
+  claimRef:
+    name: humio-kafka-data
+    namespace: default
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: humio-kafka-data
+  annotations:
+    nfs.io/storage-path: "humio-kafka-data"
+spec:
+  storageClassName: default
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 5Mi
+status: {}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: humio
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      run: humio
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+        run: humio
+    spec:
+      terminationGracePeriodSeconds: 30
+      containers:
+        - image: krishnarajeshs/humio_arm:v1.0
+          imagePullPolicy: "Always"
+          name: humio
+          env:
+            - name: TZ
+              value: US/Michigan
+            - name: user
+              value: root
+            - name: HUMIO_PORT
+              value: "9081"
+            - name: ELASTIC_PORT
+              value: "9202"
+            - name: ZOOKEEPER_URL
+              value: "127.0.0.1:2181"
+            - name: KAFKA_SERVERS
+              value: "127.0.0.1:9092"
+            - name: EXTERNAL_URL
+              value: "http://127.0.0.1:9081"
+            - name: PUBLIC_URL
+              value: "https://humio.monkeybox.org"
+            - name: HUMIO_SOCKET_BIND
+              value: "0.0.0.0"
+            - name: HUMIO_HTTP_BIND
+              value: "0.0.0.0"
+            - name: HUMIO_JVM_ARGS
+              value: "-Xss2M"
+          ports:
+            - containerPort: 2181
+            - containerPort: 9092
+            - containerPort: 9081
+          resources:
+            limits:
+              memory: "4096Mi"
+              cpu: "2000m"
+            requests:
+              memory: "500Mi"
+              cpu: "1000m"
+          volumeMounts:
+            - mountPath: /opt/humio/data
+              name: humio-data
+            - mountPath: /opt/humio/config
+              name: humio-config
+            - mountPath: /opt/humio/data/kafka-data
+              name: humio-kafka-data
+      restartPolicy: Always
+      volumes:
+        - name: humio-config
+          persistentVolumeClaim:
+            claimName: humio-config
+        - name: humio-data
+          persistentVolumeClaim:
+            claimName: humio-data
+        - name: humio-kafka-data
+          persistentVolumeClaim:
+            claimName: humio-kafka-data
+#      dnsPolicy: "None"
+#      dnsConfig:
+#        nameservers:
+#          - 10.42.42.239
+#          - 10.42.42.1
+#        searches:
+#          - default.svc.cluster.local
+#          - svc.cluster.local 
+#          - cluster.local
+#        options:
+#          - name: ndots
+#            value: "2"
+#          - name: edns0
+#          - name: trust-ad
+status: {}
+---
+# Hosting
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: humio-ingress
+  annotations:
+    cert-manager.io/cluster-issuer: "letsencrypt-stage"
+    ## No basic auth for humio
+    ## type of authentication
+    #nginx.ingress.kubernetes.io/auth-type: basic
+    ## name of the secret that contains the user/password definitions
+    #nginx.ingress.kubernetes.io/auth-secret: basic-auth
+    ## message to display with an appropriate context why the authentication is required
+    #nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - Octoprint'
+spec:
+  tls:
+  - hosts:
+    - humio.monkeybox.org
+    secretName: humio-tls
+  rules:
+  - host: humio.monkeybox.org
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: humio
+            port:
+              number: 9081