RKE1.5.7安装集群
初始化操作
# 关闭防火区、关闭selinux、关闭swap
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
# 根据规划设置主机名
hostnamectl set-hostname <hostname>
# 修改 hosts 配置(可以只修改 master,或者所有节点)
cat >> /etc/hosts << EOF
172.19.0.10 master1
172.19.0.13 worker1
EOF
# 将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system # 生效
# 时间同步
yum install -y ntpdate
ntpdate time.windows.com
所有节点安装 Kubctl
# ubuntu
sudo apt install snapd
sudo snap info kubectl
sudo snap install --classic --channel=1.27 kubectl
# Centos
# 配置阿里云 Kubernetes yum 软件源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubectl-1.28.*
为所有节点安装 docker
curl https://releases.rancher.com/install-docker/20.10.sh | sh
systemctl start docker && systemctl enable docker
为所有节点创建用户,并设置 ssh 免密登录
useradd rke
passwd rke
usermod -aG docker rke
# 登录新创建的用户 rke,测试 docker 权限
su rke
docker ps
# 出现下面这段代码说明配置成功
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
ssh-keygen -t rsa
# 配置免密登录
ssh-copy-id rke@172.27.31.149
ssh-copy-id rke@172.27.31.148
ssh-copy-id rke@172.27.31.147
K8S部署文件
wget https://github.com/rancher/rke/releases/download/v1.5.7/rke_linux-amd64
mv rke_linux-amd64 rke
chmod +x rke
# 使用已经准备的好文件,启动 k8s 集群
rke up --config cluster.yml
cluster.yml:RKE 集群的配置文件(我们手动生成的配置文件)。
kube_config_cluster.yml:该集群的Kubeconfig 文件包含了获取该集群所有权限的认证凭据。
cluster.rkestate:Kubernetes 集群状态文件,包含了获取该集群所有权限的认证凭据,使用 RKE v0.2.0 时才会创建这个文件。
安装 helm
wget https://get.helm.sh/helm-v3.14.3-linux-amd64.tar.gz tar -zxvf helm-v3.14.3-linux-amd64.tar.gz sudo mv linux-amd64/helm /usr/bin
安装: cert-manager
版本选择参考: https://ranchermanager.docs.rancher.com/zh/getting-started/installation-and-upgrade/resources/upgrade-cert-manager
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.14.4/cert-manager.crds.yaml
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.7.1
安装 rancher web ui
参考: https://ranchermanager.docs.rancher.com/zh/getting-started/installation-and-upgrade/resources/choose-a-rancher-version
helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
helm repo update
helm install rancher rancher-stable/rancher \
--set hostname=rancher.my.org \
--set replicas=3 \
--version 2.8.2 \
--namespace cattle-system \
--create-namespace
cluster.yml 文件内容
nodes:
- address: 公网地址
internal_address: 私网地址
hostname_override: master1
user: rke
port: 22
docker_socket: /var/run/docker.sock
role:
- controlplane
- etcd
- address: 公网地址
internal_address: 私网地址
hostname_override: worker1
user: rke
port: 22
docker_socket: /var/run/docker.sock
role:
- worker
- etcd
labels:
app: ingress
- address: 公网地址
internal_address: 私网地址
hostname_override: worker2
user: rke
port: 22
docker_socket: /var/run/docker.sock
role:
- worker
- etcd
labels:
app: ingress
cluster_name: k8s-cluster
kubernetes_version: v1.27.11-rancher1-1
ignore_docker_version: false
enable_cri_dockerd: true
# private_registries:
# - url: registry.com
# is_default: true
services:
etcd:
# Local 备份策略
snapshot: true # 是否启用备份 true|false
creation: 6h # 备份频率
retention: 24h # 备份保留期限
# S3 备份策略
# backup_config:
# interval_hours: 12 # 创建快照的间隔时间,单位是小时,12表示每12个小时创建一个快照
# retention: 6 # 快照的存活时间,单位是小时
# s3backupconfig:
# access_key: S3_ACCESS_KEY
# secret_key: S3_SECRET_KEY
# bucket_name: s3-bucket-name
# region: "" # 可选填
# folder: "" # 可选填,RKE v0.3.0开始可用
# endpoint: s3.amazonaws.com #默认值为:s3.amazonaws.com
# custom_ca: |-
# -----BEGIN CERTIFICATE-----
# $CERTIFICATE
# -----END CERTIFICATE-----
kube-api:
service_cluster_ip_range: 10.43.0.0/16
service_node_port_range: 30000-32767
always_pull_images: false
# 速率限制;防止多租户集群,部分租户事件请求过于频繁,拖垮集群
event_rate_limit:
enabled: true
configuration:
apiVersion: eventratelimit.admission.k8s.io/v1alpha1
kind: Configuration
limits:
- type: Server
qps: 6000
burst: 30000
kube-controller:
cluster_cidr: 10.42.0.0/16
service_cluster_ip_range: 10.43.0.0/16
kubelet:
cluster_domain: cluster.local
cluster_dns_server: 10.43.0.10
fail_swap_on: false
authorization:
mode: rbac
# Specify network plugin-in (canal, calico, flannel, weave, or none)
network:
options:
flannel_backend_type: vxlan
plugin: canal
# Specify DNS provider (coredns or kube-dns)
dns:
provider: coredns
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 20%
maxSurge: 15%
linear_autoscaler_params:
cores_per_replica: 0.34
nodes_per_replica: 4
prevent_single_point_failure: true
min: 2
max: 3
# Specify monitoring provider (metrics-server)
monitoring:
provider: metrics-server
# Available as of v1.1.0
update_strategy:
strategy: RollingUpdate
rollingUpdate:
maxUnavailable: 8
# # 目前仅支持 Nginx ingress provider
# # 要禁用 Ingress controller,设置 `provider: none`
# # 要在指定节点上禁用 Ingress,使用 node_selector,例如:
# provider: nginx
# node_selector:
# app: ingress
ingress:
provider: nginx
node_selector:
app: ingress
options:
use-forwarded-headers: "true" # 如果你的集群位于代理后面,请设置此选项为 true
http2: "true" # 可选,启用 HTTP/2
Last updated