K8s에 멀티브로커 카프카 세팅

GisangLee·2024년 4월 24일
0

k8d

목록 보기
28/29

1. zookeeper

helm repo add bitnami/zookeeper
helm repo update
helm show values bitnami/zookeeper > zoo_values.yaml
podSecurityContext:
  enabled: true
  fsGroupChangePolicy: Always
  sysctls: []
  supplementalGroups: []
  fsGroup: 1001
  
containerSecurityContext:
  enabled: true
  seLinuxOptions: {}
  runAsUser: 1001
  runAsGroup: 1001
  seccompProfile:
    type: "RuntimeDefault"
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zoo-pv-0
  namespace: <ns>
spec:
  capacity:
    storage: 8Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  hostPath:
    path: <host path>

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zoo-pv-1
  namespace: <ns>
spec:
  capacity:
    storage: 8Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  hostPath:
    path: <host path>

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zoo-pv-2
  namespace: <ns>
spec:
  capacity:
    storage: 8Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  hostPath:
    path: <host path>
sudo chown -R 1001:1001 <pv host path>
k apply -f pvc.yaml

helm install <my name> -f zoo_values.yaml -n <ns> bitnami/zookeeper --set replicaCount=3

2. Kafka

helm repo add bitnami/kafka
helm repo update
helm show values bitnami/kafka > kafka_values.yaml

...
...
broker:
  replicaCount: 3
...
...
controller:
  replicaCount: 0
  controllerOnly: false
...
...
provisioning:
  numPartitions: 3
  replicationFactor: 3
  topics:
  - name: topoc-1
    partitions: 3
    replicationFactor: 3
    config:
      flush.messages: 3
  - name: topic-2
    partitions: 3
    replicationFactor: 3
    config:
      flush.messages: 3
  - name: topic-3
    partitions: 3
    replicationFactor: 3
    config:
      flush.messages: 3
...
...
...
  podSecurityContext:
    enabled: true
    fsGroupChangePolicy: Always
    sysctls: []
    supplementalGroups: []
    fsGroup: 1001
    seccompProfile:
      type: "RuntimeDefault"
  containerSecurityContext:
    enabled: true
    seLinuxOptions: {}
    runAsUser: 1001
    runAsGroup: 1001
    runAsNonRoot: true
    allowPrivilegeEscalation: false
    readOnlyRootFilesystem: true
    capabilities:
      drop: ["ALL"]
...
...
kraft:
  enabled: false
...
...
zookeeper:
  enabled: false
  replicaCount: 1
...
...
externalZookeeper:
  servers: ["zoo-headless.<ns>.svc.cluster.local"]
apiVersion: v1
kind: PersistentVolume
metadata:
  name: kafka-pv-1
spec:
  capacity:
    storage: 8Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  hostPath:
    path: /var/log/kafka-data-1

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: kafka-pv-2
spec:
  capacity:
    storage: 8Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  hostPath:
    path: /var/log/kafka-data-2

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: kafka-pv-3
spec:
  capacity:
    storage: 8Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  hostPath:
    path: /var/log/kafka-data-3
sudo chown -R 1001:1001 <pv host path1>
sudo chown -R 1001:1001 <pv host path2>
sudo chown -R 1001:1001 <pv host path3>

helm install -n <ns> <name> bitnami/kafka -f kafka_values.yaml --set replicaCount=3 --set zookeeper.enabled=false --set controller.replicaCount=0 --set listeners.client.protocol=PLAINTEXT
profile
포폴 및 이력서 : https://gisanglee.github.io/web-porfolio/

0개의 댓글