kubeadm生产环节高可用部署

Kubernetes实战指南(三十四): 高可用安装K8s集群1.20.x https://blog.csdn.net/qinruan1856/article/details/111060291 Kubernetes实战指南(三十三):都0202了,你还在手写k8s的yaml文件? https://blog.csdn.net/qinruan1856/article/details/108142983?spm=1001.2014.3001.5501 Kubernetes实战指南(三十一):零宕机无缝迁移Spring Cloud至k8s https://blog.csdn.net/qinruan1856/article/details/107287370?spm=1001.2014.3001.5501

1. 安装说明

虽然K8s 1.20版本宣布将在1.23版本之后将不再维护dockershim,意味着K8s将不直接支持Docker,不过大家不必过于担心。一是在1.23版本之前我们仍然可以使用Docker,二是dockershim肯定会有人接盘,我们同样可以使用Docker,三是Docker制作的镜像仍然可以在其他Runtime环境中使用,所以大家不必过于恐慌。

本次安装采用的是Kubeadm安装工具,安装版本是K8s 1.20+,采用的系统为CentOS 7.9,其中Master节点3台,Node节点1台,高可用工具采用HAProxy + KeepAlived

2. 节点规划

IP
主机名

10.10.181.243

k8s-master-1

10.10.181.244

k8s-master-2

10.10.181.245

k8s-master-3

10.10.181.241

k8s-node-1

10.10.181.200

VIP

3. 基本配置

3-1、配置hosts

cat >> /etc/hosts << EOF
10.10.181.243 k8s-master-1
10.10.181.244 k8s-master-2
10.10.181.245 k8s-master-3
10.10.181.200 k8s-master-lb
10.10.181.241 k8s-node-1
EOF

3-2、Yum源配置

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo

3-3、关闭防火墙|Swap|selinux

yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y
systemctl disable --now firewalld 
systemctl disable --now dnsmasq
systemctl disable --now NetworkManager

setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab


cat >> /etc/security/limits.conf << EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF

3-4、 时间同步

rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm
yum install ntpdate -y

ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com


echo "*/5 * * * * ntpdate time2.aliyun.com" >>  /var/spool/cron/root 

3-4、 配置免密

# 配置master其中 一个免密即可,做管理分发包方便,这里以k8s-master-1
ssh-keygen -t rsa
for i in k8s-master-1 k8s-master-2 k8s-master-3 k8s-node-1;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

3-5、升级内核更新

rpm -import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
yum -y --enablerepo=elrepo-kernel install kernel-ml.x86_64 kernel-ml-devel.x86_64
yum update -y  &&  grub2-set-default 1 && reboot
# 重启后查看内核是否是更新后的内核, uname -a

# 如果提示证书错误: [Errno 14] curl#60 - "Peer's Certificate has expired." 解决方法
echo "sslverify=0" >> /etc/yum.conf
yum upgrade ca-certificates

3-6、内核模块配置

注意: kernel < 4.19 使用 nf_conntrack_ipv4 kernel > 4.19 使用 nf_conntrack

yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat >> /etc/modules-load.d/ipvs.conf  << EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

systemctl enable --now systemd-modules-load.service

3-7、内核参数调优

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system

3-8、 安装 docker

yum install docker-ce-19.03.* -y
systemctl daemon-reload && systemctl enable --now docker


# 参考: https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker
sudo mkdir /etc/docker
cat <<EOF | sudo tee /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2"
}
EOF

sudo systemctl enable docker
sudo systemctl daemon-reload
sudo systemctl restart docker

3-9、 安装组件

# 我这里安装 1.20. 版本,由于在国内gcr.io无法访问,配置国内仓库,如果国外机器或者走代理就不需要配置了
yum list kubeadm.x86_64 --showduplicates | sort -r
yum install -y  kubelet-1.20.0 kubeadm-1.20.0 kubectl-1.20.0


cat >/etc/sysconfig/kubelet<<EOF
KUBELET_EXTRA_ARGS="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2"
EOF
systemctl daemon-reload
systemctl enable --now kubelet

4. HA高可用<仅master节点>

yum install keepalived haproxy -y
mkdir /etc/haproxy

cat > /etc/haproxy/haproxy.cfg << EOF
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master-1 10.10.181.243:6443  check
  server k8s-master-2 10.10.181.244:6443  check
  server k8s-master-3 10.10.181.245:6443  check
EOF

4-1、 master 节点配置

mkdir /etc/keepalived

cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
    script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2  
rise 1
}
vrrp_instance VI_1 {
    # state BACKUP
    # mcast_src_ip 10.10.181.244
    # mcast_src_ip 10.10.181.245
    # priority 100
    # priority 99
    state MASTER
    mcast_src_ip 10.10.181.243
    interface ens192
    virtual_router_id 51
    priority 101
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        10.10.181.200
    }
    track_script {
       chk_apiserver
    }
}

EOF
cat > /etc/keepalived/check_apiserver.sh << EOF
#!/bin/bash

err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
EOF
chmod +x /etc/keepalived/check_apiserver.sh
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived

5、集群初始化

通过配置文件启动主节点

# 如果有 kubeadm 安装的集群,可以导出初始化集群配置文件进行修改
kubeadm config print init-defaults > kubeadm.yaml
kubeadm init --config kubeadm.yaml
如果 token过期,或者忘记了 join 命令,可以使用命令 kubeadm token create--print-join-command 重新获取。

然后根据我们自己的需求修改配置,比如修改 imageRepository 的值,kube-proxy 的模式为 ipvs

k8s-master-1 节点创建 new.yaml 配置文件如下

# 注意:如果不是高可用集群,192.168.0.236:16443改为master01的地址,16443改为apiserver的端口,默认是6443,
# 注意:更改v1.20.0为自己服务器kubeadm的版本:kubeadm version
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: 7t2weq.bjbawausm0jaxury
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.10.181.243
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master-1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - 10.10.181.200
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 10.10.181.200:16443
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.0
networking:
  dnsDomain: cluster.local
  podSubnet: 172.168.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
#---
#apiVersion: kubeproxy.config.k8s.io/v1alpha1
#kind: KubeProxyConfiguration
#mode: ipvs

将 new.yaml 文件复制到其他 master 节点,修改 advertiseAddress 为本机地址, 之后所有 Master 节点提前下载镜像,可以节省初始化时间:

kubeadm config images pull --config /root/new.yaml 

所有节点设置开机自启动kubelet

systemctl enable --now kubelet(如果启动失败无需管理,初始化成功以后即可启动)

k8s-master-1 节点初始化,初始化以后会在 /etc/kubernetes 目录下生成对应的证书和配置文件,之后其他 Master节点加入 k8s-master-1 即可:

kubeadm init --config /root/new.yaml  --upload-certs

初始化成功以后,会产生Token值,用于其他节点加入时使用,因此要记录下初始化成功生成的token值(令牌值)

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 10.10.181.200:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:52085db55d1bf3bb09554956800b4e2597e010618d4b8db24de56a2fb5637ad6 \
    --control-plane --certificate-key 69ce5c4ab222fd428c65c65de18879280d7a1faa36b70d35e186e22d078f6a91

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.10.181.200:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:52085db55d1bf3bb09554956800b4e2597e010618d4b8db24de56a2fb5637ad6

k8s-master-1 节点配置环境变量,用于访问Kubernetes集群:

cat <<EOF >> /root/.bashrc
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source /root/.bashrc
kubectl get nodes    # NotReady  状态
kubectl get pods -n kube-system -o wide       # coredns Pending   状态,因为 没有部署网络插件

6、 添加节点

# 初始化其他master加入集群,高可用
  kubeadm join 10.10.181.200:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:52085db55d1bf3bb09554956800b4e2597e010618d4b8db24de56a2fb5637ad6 \
    --control-plane --certificate-key 69ce5c4ab222fd428c65c65de18879280d7a1faa36b70d35e186e22d078f6a91

# 配置其他 master 用于访问Kubernetes集群
cat <<EOF >> /root/.bashrc
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source /root/.bashrc

# 添加 Node 节点
kubeadm join 10.10.181.200:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:52085db55d1bf3bb09554956800b4e2597e010618d4b8db24de56a2fb5637ad6


# 查看状态
[root@k8s-master-1 ~]# kubectl  get node
NAME           STATUS     ROLES                  AGE   VERSION
k8s-master-1   NotReady   control-plane,master   23m   v1.20.0
k8s-master-2   NotReady   control-plane,master   19m   v1.20.0
k8s-master-3   NotReady   control-plane,master   19m   v1.20.0
k8s-node-1     NotReady   <none>                 18m   v1.20.0

7、 Calico安装

# 下载安装所有的源码文件:
cd /root/ ; git clone https://github.com/dotbalo/k8s-ha-install.git    

cd /root/k8s-ha-install && git checkout manual-installation-v1.20.x && cd calico/

修改calico-etcd.yaml的以下位置

sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://10.10.181.243:2379,https://10.10.181.244:2379,https://10.10.181.245:2379"#g' calico-etcd.yaml


ETCD_CA=`cat /etc/kubernetes/pki/etcd/ca.crt | base64 | tr -d '\n'`
ETCD_CERT=`cat /etc/kubernetes/pki/etcd/server.crt | base64 | tr -d '\n'`
ETCD_KEY=`cat /etc/kubernetes/pki/etcd/server.key | base64 | tr -d '\n'`
sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico-etcd.yaml


sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico-etcd.yaml

POD_SUBNET=`cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'`

sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@#   value: "192.168.0.0/16"@  value: '"${POD_SUBNET}"'@g' calico-etcd.yaml

创建calico

kubectl apply -f calico-etcd.yaml

查看

[root@k8s-master-2 ~]# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-5f6d4b864b-w2ch5   1/1     Running   0          4m58s
kube-system   calico-node-pnxgh                          1/1     Running   0          4m58s
kube-system   calico-node-sjjxl                          1/1     Running   0          4m58s
kube-system   calico-node-sq4xj                          1/1     Running   0          4m58s
kube-system   calico-node-tc4nb                          1/1     Running   0          4m58s
kube-system   coredns-74ff55c5b-7g8ls                    1/1     Running   0          33m
kube-system   coredns-74ff55c5b-v5xwz                    1/1     Running   0          33m
kube-system   etcd-k8s-master-1                          1/1     Running   0          33m
kube-system   etcd-k8s-master-2                          1/1     Running   0          29m
kube-system   etcd-k8s-master-3                          1/1     Running   0          28m
kube-system   kube-apiserver-k8s-master-1                1/1     Running   0          33m
kube-system   kube-apiserver-k8s-master-2                1/1     Running   0          29m
kube-system   kube-apiserver-k8s-master-3                1/1     Running   0          28m
kube-system   kube-controller-manager-k8s-master-1       1/1     Running   1          33m
kube-system   kube-controller-manager-k8s-master-2       1/1     Running   0          29m
kube-system   kube-controller-manager-k8s-master-3       1/1     Running   0          28m
kube-system   kube-proxy-7s9wd                           1/1     Running   0          29m
kube-system   kube-proxy-cgk2n                           1/1     Running   0          33m
kube-system   kube-proxy-dnk6k                           1/1     Running   0          28m
kube-system   kube-proxy-h8g8t                           1/1     Running   0          28m
kube-system   kube-scheduler-k8s-master-1                1/1     Running   1          33m
kube-system   kube-scheduler-k8s-master-2                1/1     Running   0          29m
kube-system   kube-scheduler-k8s-master-3                1/1     Running   0          28m

8、 Metrics Server部署

在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率。 将 k8s-master-1 节点的 front-proxy-ca.crt 复制到所有Node节点

Node=(k8s-node-1)
for i in ${Node[@]}
do
    scp /etc/kubernetes/pki/front-proxy-ca.crt $i:/etc/kubernetes/pki/front-proxy-ca.crt
done

安装metrics server

# 安装 metrics server
cd /root/k8s-ha-install/metrics-server-0.4.x-kubeadm/
kubectl  create -f comp.yaml 


# 等待 kube-system 命令空间下的Pod全部启动后,查看状态
[root@k8s-master-1 ~]# kubectl  top node
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master-1   186m         9%     1289Mi          33%       
k8s-master-2   217m         5%     1434Mi          18%       
k8s-master-3   176m         8%     1279Mi          33%       
k8s-node-1     154m         7%     833Mi           21% 

9、Dashboard部署

[root@k8s-master-1 ~]# cd /root/k8s-ha-install/dashboard/ && kubectl  create -f .
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

在谷歌浏览器(Chrome)启动文件中加入启动参数,用于解决无法访问Dashboard的问题,参考图:

--test-type --ignore-certificate-errors

更改dashboard的svc为NodePort:

kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard

将ClusterIP更改为NodePort(如果已经为NodePort忽略此步骤):

根据自己的实例端口号,通过任意安装了kube-proxy的宿主机或者VIP的IP+端口即可访问到dashboard: 访问Dashboard:https://10.10.181.243:18282(请更改18282为自己的端口),选择登录方式为令牌(即token方式)

查看token值:

[root@k8s-master-1]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-r4vcp
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 2112796c-1c9e-11e9-91ab-000c298bf023

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1025 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXI0dmNwIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIyMTEyNzk2Yy0xYzllLTExZTktOTFhYi0wMDBjMjk4YmYwMjMiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.bWYmwgRb-90ydQmyjkbjJjFt8CdO8u6zxVZh-19rdlL_T-n35nKyQIN7hCtNAt46u6gfJ5XXefC9HsGNBHtvo_Ve6oF7EXhU772aLAbXWkU1xOwQTQynixaypbRIas_kiO2MHHxXfeeL_yYZRrgtatsDBxcBRg-nUQv4TahzaGSyK42E_4YGpLa3X3Jc4t1z0SQXge7lrwlj8ysmqgO4ndlFjwPfvg0eoYqu9Qsc5Q7tazzFf9mVKMmcS1ppPutdyqNYWL62P1prw_wclP0TezW1CsypjWSVT4AuJU8YmH8nTNR1EXn8mJURLSjINv6YbZpnhBIPgUGk1JYVLcn47w

10、设置 IPVS 模式

将 kube-proxy 修改为 ipvs 模式,因为初始化的时候注释了 ipvs

kubectl edit cm kube-proxy -n kube-system
mode: ipvs

滚动更新 kube-proxy 的 pod

kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system

验证:

curl 127.0.0.1:10249/proxyMode
ipvs

# 查看路由
ipvsadm -ln

11、 剔除Master污点

kubeadm 安装后,master 节点默认不允许部署 pod,可以通过如下方式:

# 查看 Taint
[root@k8s-master-1 ~]# kubectl describe node -l node-role.kubernetes.io/master |grep -i taints
Taints:             node-role.kubernetes.io/master:NoSchedule
Taints:             node-role.kubernetes.io/master:NoSchedule
Taints:             node-role.kubernetes.io/master:NoSchedule

# 剔除 Taint
[root@k8s-master-1 ~]# kubectl taint node -l node-role.kubernetes.io/master node-role.kubernetes.io/master:NoSchedule-
node/k8s-master-1 untainted
node/k8s-master-2 untainted
node/k8s-master-3 untainted

[root@k8s-master-1 ~]# kubectl describe node -l node-role.kubernetes.io/master |grep -i taints
Taints:             <none>
Taints:             <none>
Taints:             <none>

12、证书只有1年的处理:

kubeadm 安装的集群,证书有效期默认一年, master 节点的kube-apiserver、kube-scheduler、kube-controller-manager、etcd都是以容器运行

方法一:

手动在1年内更新证书 也可以把下面命令写一个定时任务。每1个月执行一次

# 查看现有证书到期时间
$ kubeadm alpha certs check-expiration
# 使用二进制更新证书
$ kubeadm alpha certs renew all
# 每月的最后1天
0 0 L * * * /usr/bin/kubeadm alpha certs renew all
 
查看证书
 
cd /etc/kubernetes/pki
 
openssl x509 -in apiserver.crt -noout -text |grep Not
            Not Before: Nov 13 03:43:30 2019 GMT
            Not After : Nov 17 01:41:50 2020 GMT
openssl x509 -in front-proxy-client.crt -noout -text |grep Not
            Not Before: Nov 13 03:43:23 2019 GMT
            Not After : Nov 17 01:41:56 2020 GMT

方法二

直接修改kubeadm 源码 增加证书到100年。

$ git clone https://github.com/kubernetes/kubernetes.git
 
$ cd kubernetes
# 编辑源码
$ git checkout release-1.15
$ vim cmd/kubeadm/app/util/pkiutil/pki_helpers.go
$ git diff
--- a/cmd/kubeadm/app/util/pkiutil/pki_helpers.go
+++ b/cmd/kubeadm/app/util/pkiutil/pki_helpers.go
@@ -571,7 +571,7 @@ func NewSignedCert(cfg *certutil.Config, key crypto.Signer, caCert *x509.Certifi
  IPAddresses: cfg.AltNames.IPs,
  SerialNumber: serial,
  NotBefore: caCert.NotBefore,
- NotAfter: time.Now().Add(kubeadmconstants.CertificateValidity).UTC(),
+ NotAfter: time.Now().Add(kubeadmconstants.CertificateValidity * 100).UTC(),
  KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
  ExtKeyUsage: cfg.Usages,
 
# 编译二进制
$ go version
go version go1.12.7 linux/amd64
$ go build ./cmd/kubeadm
 
# 使用二进制更新证书
$ ./kubeadm alpha certs renew all
certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself renewed
certificate for serving the Kubernetes API renewed
certificate the apiserver uses to access etcd renewed
certificate for the API server to connect to kubelet renewed
certificate embedded in the kubeconfig file for the controller manager to use renewed
certificate for liveness probes to healtcheck etcd renewed
certificate for etcd nodes to communicate with each other renewed
certificate for serving etcd renewed
certificate for the front proxy client renewed
certificate embedded in the kubeconfig file for the scheduler manager to use renewed
 
查看证书
 
cd /etc/kubernetes/pki
 
openssl x509 -in front-proxy-client.crt   -noout -text  |grep Not
            Not Before: Nov 28 09:07:02 2018 GMT
            Not After : Nov 25 09:07:03 2028 GMT
            
openssl x509 -in apiserver.crt   -noout -text  |grep Not
            Not Before: Nov 28 09:07:04 2018 GMT
            Not After : Nov 25 09:07:04 2028 GMT

13、注意事项

kubeadm 与 二级制不同的是: kubelet 的配置文件在 /etc/sysconfig/kubelet 和 /var/lib/kubelet/config.yaml 其他的配置文件 /etc/kubernetes/manifests/ 目录下, 比如 kube-apiserver.yaml, 改yaml文件更改后, kubelet会自动刷新配置,也就是会重启pod. 不能再次创建该文件

Last updated