cluster-seata

  • MySQL: 参考 external-mysql 文章

  • Pod 多节点反亲和性调度,单节点失效

  • Node 调度, 只运行在指定的节点上

  • 服务注册到 nacos

  • 数据存储在 MySQL 上

[root@master test]# cat cluster-seata-statefulset.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: seata-sta-server-cm
data:
  application.yml: |
    server:
      # web管理的端口
      port: 7091
    spring:
      application:
        name: seata-server
    logging:
      config: classpath:logback-spring.xml
      file:
        path: ${user.home}/logs/seata
      extend:
        logstash-appender:
          destination: 127.0.0.1:4560
        kafka-appender:
          bootstrap-servers: 127.0.0.1:9092
          topic: logback_to_logstash
    console:
      # web端管理的用户名和密码
      user:
        username: seata
        password: seata
    seata:
      config:
        type: nacos
        nacos:
          # nacos的地址 k8s内部域名需要在后面加端口号
          server-addr: nacos-sta-svc.adefault.svc.cluster.local:8848
          # namespace:  默认使用 public
          group: SEATA_GROUP
          username: nacos
          password: nacos
          # 创建的配置文件名称
          data-id: seataServer.properties
      registry:
        type: nacos
        nacos:
          application: seata-server
          server-addr: nacos-sta-svc.default.svc.cluster.local:8848
          group: SEATA_GROUP
          #namespace: 
          cluster: default
          username: nacos
          password: nacos
      server:
        service-port: 8091
      security:
        secretKey: SeataSecretKey0c382ef121d778043159209298fd40bf3850a017
        tokenValidityInMilliseconds: 1800000
        ignore:
          urls: /,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-fe/public/**,/api/v1/auth/login

---
apiVersion: v1
kind: Service
metadata:
  name: seata-sta-server
  labels:
    app.kubernetes.io/name: seata-sta-server
spec:
  type: NodePort
  ports:
    - port: 8091
      protocol: TCP
      targetPort: 8091
      nodePort: 30891
      name: http
    - port: 7091
      protocol: TCP
      targetPort: 7091
      nodePort: 30791
      name: web
  selector:
    app.kubernetes.io/name: seata-sta-server
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: seata-sta-server
  labels:
    app.kubernetes.io/name: seata-sta-server
spec:
  serviceName: seata-sta-server-headless
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: seata-sta-server
  template:
    metadata:
      labels:
        app.kubernetes.io/name: seata-sta-server
    spec:
      # 节点打上中间件标签并污染:kubectl taint nodes <node-name> middleware=true:NoSchedule
      #tolerations:
      #  - key: "middleware"
      #    operator: "Equal"
      #    value: "true"
      #    effect: "NoSchedule"
      
      # 当多节点生效:不允许同一个pod调度到同一个节点
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - middleware
              topologyKey: "kubernetes.io/hostname"
      containers:
        - name: seata-sta-server
          image: docker.io/seataio/seata-server:latest
          imagePullPolicy: IfNotPresent
          ports:
            - name: http
              containerPort: 8091
              protocol: TCP
            - name: web
              containerPort: 7091
              protocol: TCP
          resources:
            requests:
              memory: "200Mi"
              cpu: "0.1"
            limits:
              memory: "1Gi"
              cpu: "1"
          livenessProbe:
            tcpSocket:
              port: 8091
            failureThreshold: 10
            initialDelaySeconds: 10
            periodSeconds: 5
            timeoutSeconds: 1
          readinessProbe:
            tcpSocket:
              port: 8091
            failureThreshold: 10
            initialDelaySeconds: 10
            periodSeconds: 5
            timeoutSeconds: 1
          volumeMounts:
          - mountPath: /seata-server/resources/application.yml
            name: seata-cm
            subPath: application.yml
      volumes:
        - name: seata-cm
          configMap:
            name: seata-sta-server-cm

nacos 配置

seataServer.properties

store.mode=db
#-----db-----
store.db.datasource=druid
store.db.dbType=mysql
# 需要根据mysql的版本调整driverClassName
# mysql8及以上版本对应的driver:com.mysql.cj.jdbc.Driver
# mysql8以下版本的driver:com.mysql.jdbc.Driver
store.db.driverClassName=com.mysql.cj.jdbc.Driver
store.db.url=jdbc:mysql://singlenode-mysql.default.svc.cluster.local:3306/seata?useUnicode=true&characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useSSL=false
store.db.user= root
store.db.password= 123456
# 数据库初始连接数
store.db.minConn=1
# 数据库最大连接数
store.db.maxConn=200
# 获取连接时最大等待时间 默认5000,单位毫秒
store.db.maxWait=5000
# 全局事务表名 默认global_table
store.db.globalTable=global_table
# 分支事务表名 默认branch_table
store.db.branchTable=branch_table
# 全局锁表名 默认lock_table
store.db.lockTable=lock_table
store.db.distributedLockTable=distributed_lock
# 查询全局事务一次的最大条数 默认100
store.db.queryLimit=100


# undo保留天数 默认7天,log_status=1(附录3)和未正常清理的undo
server.undo.logSaveDays=7
# undo清理线程间隔时间 默认86400000,单位毫秒
server.undo.logDeletePeriod=86400000
# 二阶段提交重试超时时长 单位ms,s,m,h,d,对应毫秒,秒,分,小时,天,默认毫秒。默认值-1表示无限重试
# 公式: timeout>=now-globalTransactionBeginTime,true表示超时则不再重试
# 注: 达到超时时间后将不会做任何重试,有数据不一致风险,除非业务自行可校准数据,否者慎用
server.maxCommitRetryTimeout=-1
# 二阶段回滚重试超时时长
server.maxRollbackRetryTimeout=-1
# 二阶段提交未完成状态全局事务重试提交线程间隔时间 默认1000,单位毫秒
server.recovery.committingRetryPeriod=1000
# 二阶段异步提交状态重试提交线程间隔时间 默认1000,单位毫秒
server.recovery.asynCommittingRetryPeriod=1000
# 二阶段回滚状态重试回滚线程间隔时间  默认1000,单位毫秒
server.recovery.rollbackingRetryPeriod=1000
# 超时状态检测重试线程间隔时间 默认1000,单位毫秒,检测出超时将全局事务置入回滚会话管理器
server.recovery.timeoutRetryPeriod=1000

启动成功后,会在 nacos 上看到服务注册上

Last updated