浏览代码

Flink work. Minio added.

Fred Damstra (Macbook 2015) 2 年之前
父节点
当前提交
8ec77dacc3

+ 63 - 0
Workloads/flink/1.flink-configuration-configmap.yaml

@@ -0,0 +1,63 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-config
+  labels:
+    app: flink
+data:
+  flink-conf.yaml: |+
+    jobmanager.rpc.address: flink-jobmanager
+    taskmanager.numberOfTaskSlots: 2
+    blob.server.port: 6124
+    jobmanager.rpc.port: 6123
+    taskmanager.rpc.port: 6122
+    queryable-state.proxy.ports: 6125
+    jobmanager.memory.process.size: 1600m
+    taskmanager.memory.process.size: 1728m
+    parallelism.default: 2    
+  log4j-console.properties: |+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = false
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = 10
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF    
+

+ 47 - 0
Workloads/flink/2.jobmanager-service.yaml

@@ -0,0 +1,47 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager
+spec:
+  type: LoadBalancer
+  loadBalancerIP: 10.42.42.236
+  ports:
+  - name: rpc
+    port: 6123
+  - name: blob-server
+    port: 6124
+  - name: webui
+    port: 8081
+  selector:
+    app: flink
+    component: jobmanager
+---
+# Hosting
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: flink-jobmanager-auth
+  annotations:
+    #nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+    #nginx.ingress.kubernetes.io/proxy-ssl-verify: "off"
+    cert-manager.io/cluster-issuer: "letsencrypt-prod"
+    # type of authentication
+    nginx.ingress.kubernetes.io/auth-type: basic
+    nginx.ingress.kubernetes.io/auth-secret: basic-auth
+    nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - Flink'
+spec:
+  tls:
+  - hosts:
+    - flink.monkeybox.org
+    secretName: flink-tls
+  rules:
+  - host: flink.monkeybox.org
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: flink-jobmanager
+            port:
+              number: 8081

+ 54 - 0
Workloads/flink/3.jobmanager-session-deployment-non-ha.yaml

@@ -0,0 +1,54 @@
+# Enough to let k8s restart it automatically
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-jobmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: jobmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: jobmanager
+    spec:
+      containers:
+      - name: jobmanager
+        image: apache/flink:1.16.2-scala_2.12
+        args: ["jobmanager"]
+        ports:
+        - containerPort: 6123
+          name: rpc
+        - containerPort: 6124
+          name: blob-server
+        - containerPort: 8081
+          name: webui
+        resources:
+          limits:
+            memory: "8192Mi"
+            cpu: "4000m"
+          requests:
+            memory: "1024Mi"
+            cpu: "1000m"
+        livenessProbe:
+          tcpSocket:
+            port: 6123
+          initialDelaySeconds: 30
+          periodSeconds: 60
+        volumeMounts:
+        - name: flink-config-volume
+          mountPath: /opt/flink/conf
+        securityContext:
+          runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      volumes:
+      - name: flink-config-volume
+        configMap:
+          name: flink-config
+          items:
+          - key: flink-conf.yaml
+            path: flink-conf.yaml
+          - key: log4j-console.properties
+            path: log4j-console.properties

+ 71 - 0
Workloads/flink/4.taskmanager-session-deployment.beam.yaml

@@ -0,0 +1,71 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      containers:
+      - name: taskmanager
+        image: apache/flink:1.16.2-scala_2.12
+        # image: apache/flink:1.10.3
+        # image: apache/flink:1.12.0-scala_2.11
+        args: ["taskmanager"]
+        ports:
+        - containerPort: 6122
+          name: rpc
+        - containerPort: 6125
+          name: query-state
+        livenessProbe:
+          tcpSocket:
+            port: 6122
+          initialDelaySeconds: 30
+          periodSeconds: 60
+        resources:
+          limits:
+            memory: "8192Mi"
+            cpu: "4000m"
+          requests:
+            memory: "1024Mi"
+            cpu: "1000m"
+        volumeMounts:
+        - name: flink-config-volume
+          mountPath: /opt/flink/conf/
+        securityContext:
+          runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      - name: beam-worker-pool
+        image: apache/beam_python3.7_sdk
+        args: ["--worker_pool"]
+        ports:
+        - containerPort: 50000
+          name: pool
+        livenessProbe:
+          tcpSocket:
+            port: 50000
+          initialDelaySeconds: 30
+          periodSeconds: 60
+        resources:
+          limits:
+            memory: "8192Mi"
+            cpu: "4000m"
+          requests:
+            memory: "1024Mi"
+            cpu: "1000m"
+      volumes:
+      - name: flink-config-volume
+        configMap:
+          name: flink-config
+          items:
+          - key: flink-conf.yaml
+            path: flink-conf.yaml
+          - key: log4j-console.properties
+            path: log4j-console.properties

+ 44 - 0
Workloads/flink/4.taskmanager-session-deployment.yaml.dist

@@ -0,0 +1,44 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      containers:
+      - name: taskmanager
+        image: apache/flink:1.17.1-scala_2.12
+        args: ["taskmanager"]
+        ports:
+        - containerPort: 6122
+          name: rpc
+        - containerPort: 6125
+          name: query-state
+        livenessProbe:
+          tcpSocket:
+            port: 6122
+          initialDelaySeconds: 30
+          periodSeconds: 60
+        volumeMounts:
+        - name: flink-config-volume
+          mountPath: /opt/flink/conf/
+        securityContext:
+          runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      volumes:
+      - name: flink-config-volume
+        configMap:
+          name: flink-config
+          items:
+          - key: flink-conf.yaml
+            path: flink-conf.yaml
+          - key: log4j-console.properties
+            path: log4j-console.properties

+ 13 - 0
Workloads/flink/5.beam-service.yaml

@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: beam-worker-pool
+spec:
+  type: LoadBalancer
+  loadBalancerIP: 10.42.42.235
+  ports:
+  - name: beam
+    port: 50000
+  selector:
+    app: flink
+    component: taskmanager

+ 0 - 0
Workloads/flink/1.account.yaml → Workloads/flink/old/1.account.yaml


+ 0 - 0
Workloads/flink/2.flink-configuration-configmap.yaml → Workloads/flink/old/2.flink-configuration-configmap.yaml


+ 0 - 0
Workloads/flink/3.jobmanager.yaml → Workloads/flink/old/3.jobmanager.yaml


+ 0 - 0
Workloads/flink/4.jobmanager-session-deployment.yaml → Workloads/flink/old/4.jobmanager-session-deployment.yaml


+ 0 - 0
Workloads/flink/5.taskmanager-session-deployment.yaml → Workloads/flink/old/5.taskmanager-session-deployment.yaml


+ 0 - 0
Workloads/flink/README.nativek8s.md → Workloads/flink/old/README.nativek8s.md


+ 0 - 0
Workloads/flink/README.standalone.md → Workloads/flink/old/README.standalone.md


+ 0 - 0
Workloads/flink/beam_test.py → Workloads/flink/old/beam_test.py


+ 26 - 0
Workloads/flink/readme.md

@@ -0,0 +1,26 @@
+# Flink
+
+## Sources
+
+* [Official flink "how to install"](https://nightlies.apache.org/flink/flink-docs-release-1.17/docs/deployment/resource-providers/standalone/kubernetes)
+* [An old guide](https://python.plainenglish.io/apache-beam-flink-cluster-kubernetes-python-a1965f37b7cb)
+
+## Process
+
+Files have been modified:
+* Loadbalancer Server Added with Auth
+* Beam config added to 4.taskmanager-session-deployment.beam.yaml
+
+Apply each in turn. I didn't use a separate namespace as it broke http auth.
+
+## Testing
+
+Didn't work... hopefully I'll update these notes.
+
+```
+python3.10 -m venv env
+source env/bin/activate
+pip3 install -r requirements.txt
+python3.10 test-beam.py
+```
+

+ 31 - 0
Workloads/minio/minio-console-ingress.yaml

@@ -0,0 +1,31 @@
+# Hosting
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: minio-console
+  namespace: minio-operator
+  annotations:
+    cert-manager.io/cluster-issuer: "letsencrypt-prod"
+    ## basic auth for index
+    # type of authentication
+    #nginx.ingress.kubernetes.io/auth-type: basic
+    # name of the secret that contains the user/password definitions
+    #nginx.ingress.kubernetes.io/auth-secret: basic-auth
+    # message to display with an appropriate context why the authentication is required
+    #nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - Jupyterhub'
+spec:
+  tls:
+  - hosts:
+    - minio-console.monkeybox.org
+    secretName: minio-console-tls
+  rules:
+  - host: minio-console.monkeybox.org
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: microk8s-console
+            port:
+              number: 9090

+ 31 - 0
Workloads/minio/minio-ingress.yaml

@@ -0,0 +1,31 @@
+# Hosting
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: minio
+  namespace: minio-operator
+  annotations:
+    cert-manager.io/cluster-issuer: "letsencrypt-prod"
+    ## basic auth for index
+    # type of authentication
+    #nginx.ingress.kubernetes.io/auth-type: basic
+    # name of the secret that contains the user/password definitions
+    #nginx.ingress.kubernetes.io/auth-secret: basic-auth
+    # message to display with an appropriate context why the authentication is required
+    #nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - Jupyterhub'
+spec:
+  tls:
+  - hosts:
+    - minio.monkeybox.org
+    secretName: minio-tls
+  rules:
+  - host: minio.monkeybox.org
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: microk8s-hl
+            port:
+              number: 9000

+ 37 - 0
Workloads/minio/readme.md

@@ -0,0 +1,37 @@
+# Installation
+
+```
+# For options:
+sudo microk8s enable minio:-h
+# Install with 100GB
+microk8s enable minio -c 100G -s default
+```
+
+Lots of output. notables:
+
+```
+To open Operator UI, start a port forward using this command:
+
+kubectl minio proxy -n minio-operator
+
+-----------------
+Create default tenant with:
+
+  Name: microk8s
+  Capacity: 100G
+  Servers: 1
+  Volumes: 1
+  Storage class: default
+  TLS: no
+  Prometheus: no
+
+APPLICATION	SERVICE NAME    	NAMESPACE     	SERVICE TYPE	SERVICE PORT
+MinIO      	minio           	minio-operator	ClusterIP   	80
+Console    	microk8s-console	minio-operator	ClusterIP   	9090
+
+You can manage minio tenants using the kubectl-minio plugin.
+
+For more details, use
+    microk8s kubectl-minio --help
+```
+