Helm安装Kafka

安装 Kafka


[root@master ~]# helm repo add bitnami  https://charts.bitnami.com/bitnami
[root@master ~]# helm repo update bitnami
[root@master ~]# helm search repo bitnami/kafka -l |head -n 5
NAME            CHART VERSION   APP VERSION     DESCRIPTION                                       
bitnami/kafka   30.0.5          3.8.0           Apache Kafka is a distributed streaming platfor...
bitnami/kafka   30.0.4          3.8.0           Apache Kafka is a distributed streaming platfor...
bitnami/kafka   30.0.3          3.8.0           Apache Kafka is a distributed streaming platfor...
bitnami/kafka   30.0.2          3.8.0           Apache Kafka is a distributed streaming platfor...
  • 下载最新chart包 helm pull bitnami/kafka

  • 下载指定版本 helm pull bitnami/kafka --version 30.0.2

  • 下载并解压 helm pull bitnami/kafka --untar

通过下载 30.0.5 chart 包,解压,并筛选 values.yaml 文件,使用一些自定义的参数覆盖包里参数

参考如下:

如果不持久化: persistence.enable: false

[root@master ~]# cat kafka-values.yaml 
image:
  registry: docker.io
  repository: bitnami/kafka
  tag: 3.8.0-debian-12-r3
  pullPolicy: IfNotPresent
listeners:
  client:
    containerPort: 9092
    protocol: PLAINTEXT 
    name: CLIENT
  controller:
    name: CONTROLLER
    containerPort: 9093
    protocol: PLAINTEXT
    sslClientAuth: ""
  interbroker:
    containerPort: 9094
    protocol: PLAINTEXT
    name: INTERNAL
    sslClientAuth: ""
  external:
    containerPort: 9095
    protocol: PLAINTEXT
    name: EXTERNAL
    sslClientAuth: ""
controller:
  replicaCount: 3
  resources:
    limits:
      cpu: "1"
      memory: "1Gi"
    requests:
      cpu: "0.5"
      memory: "500Mi"
  autoscaling:
    hpa:
      enabled: true
      minReplicas: "3"
      maxReplicas: "5"
      targetCPU: "85"
      targetMemory: "85"
  persistence:
    enabled: true
    #storageClass: "standard"
    accessModes:
      - ReadWriteOnce
    size: 1Gi

broker:
  replicaCount: 0
service:
  type: ClusterIP
  ports:
    client: 9092
    controller: 9093
    interbroker: 9094
    external: 9095
metrics:
  jmx:
    enabled: false
    resources:
      requests:
        cpu: 0.1
        memory: 128Mi
      limits:
        cpu: 0.5
        memory: 512Mi
kraft:
  enabled: true

[root@master kafka]# kubectl create namespace kube-public
[root@master kafka]# helm install kafka bitnami/kafka -f kafka-values.yaml --namespace kube-public
NAME: kafka
LAST DEPLOYED: Mon Sep  9 21:07:48 2024
NAMESPACE: kube-public
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
CHART NAME: kafka
CHART VERSION: 30.0.5
APP VERSION: 3.8.0

** Please be patient while the chart is being deployed **

Kafka can be accessed by consumers via port 9092 on the following DNS name from within your cluster:

    kafka.kube-public.svc.cluster.local

Each Kafka broker can be accessed by producers via port 9092 on the following DNS name(s) from within your cluster:

    kafka-controller-0.kafka-controller-headless.kube-public.svc.cluster.local:9092
    kafka-controller-1.kafka-controller-headless.kube-public.svc.cluster.local:9092
    kafka-controller-2.kafka-controller-headless.kube-public.svc.cluster.local:9092

To create a pod that you can use as a Kafka client run the following commands:

    ===>>> 临时客户端
    kubectl run kafka-client --restart='Never' --image docker.io/bitnami/kafka:3.8.0-debian-12-r3 --namespace kube-public --command -- sleep infinity
    kubectl exec --tty -i kafka-client --namespace kube-public -- bash

    PRODUCER:  ===>>> 生产者
        kafka-console-producer.sh \
            --broker-list kafka-controller-0.kafka-controller-headless.kube-public.svc.cluster.local:9092,kafka-controller-1.kafka-controller-headless.kube-public.svc.cluster.local:9092,kafka-controller-2.kafka-controller-headless.kube-public.svc.cluster.local:9092 \
            --topic test

    CONSUMER:  ===>>> 消费者
        kafka-console-consumer.sh \
            --bootstrap-server kafka.kube-public.svc.cluster.local:9092 \
            --topic test \
            --from-beginning

测试 Kafka

升级

helm upgrade  kafka -f kafka-values -n kafka

UI 界面

[root@master kafka]# cat kafka-ui.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-ui-deployment
  labels:
    app: kafka-ui
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kafka-ui
  template:
    metadata:
      labels:
        app: kafka-ui
    spec:
      containers:
      - name: kafka-ui
        image: provectuslabs/kafka-ui:latest
        env:
        - name: KAFKA_CLUSTERS_0_NAME
          value: "Kafka Cluster"
        - name: KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS
          value: ka-controller-0.kafka-controller-headless.kube-public.svc.cluster.local:9092,ka-controller-1.kafka-controller-headless.kube-public.svc.cluster.local:9092,ka-controller-2.kafka-controller-headless.kube-public.svc.cluster.local:9092
        imagePullPolicy: IfNotPresent
        resources:
          requests:
            memory: "256Mi"
            cpu: "100m"
          limits:
            memory: "1024Mi"
            cpu: "1000m"
        ports:
        - containerPort: 8080
---

apiVersion: v1
kind: Service
metadata:
  name: kafka-ui-service
spec:
  selector:
    app: kafka-ui
  type: NodePort
  ports:
    - protocol: TCP
      port: 8080 # 有域名配置个ingress来访问,就不用nodeport了
      targetPort: 8080
      nodePort: 30180 # node port可以通过集群IP+端口访问

[root@master kafka]# kubectl apply -f kafka-ui.yaml -n kube-public
deployment.apps/kafka-ui-deployment created

Last updated