使用kubeadm安装K8S v1.30高可用集群及docker容器运行时

参考文章:https://github.com/cby-chen/Kubernetes/blob/main/doc/kubeadm-install.md

环境

名称 IP地址 操作系统
master-1 192.168.10.229 centos7.9
master-2 192.168.10.230 centos7.9
master-3 192.168.10.232 centos7.9
node-1 192.168.10.226 centos7.9
node-2 192.168.10.227 centos7.9
node-3 192.168.10.228 centos7.9
vip 192.168.10.251

一、Linux优化

1.1 设置阿里yum源

阿里云镜像站地址:https://developer.aliyun.com/mirror/

rm -rf /etc/yum.repos.d/
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum update -y

1.2 安装常用软件

yum install wget yum-utils device-mapper-persistent-data lvm2 nfs-utils ipvsadm psmisc vim net-tools telnet -y

1.3 系统设置

关闭防火墙、selinux

[root@k8s-master1 ~]# systemctl stop firewalld && systemctl disable firewalld
[root@k8s-master1 ~]# setenforce 0
[root@k8s-master1 ~]# sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
[root@k8s-master1 ~]# cat /etc/selinux/config 
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected. 
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted 

关闭swap分区

swapoff -a

# 用vi修改/etc/fstab文件,在swap分区这行前加 # 禁用掉,保存退出
vi /etc/fstab

配置ulimit

ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF

配置ipvs

yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

systemctl restart systemd-modules-load.service

修改内核参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv6.conf.all.forwarding = 1
EOF

sysctl -p

# 这些是Linux系统的一些参数设置,用于配置和优化网络、文件系统和虚拟内存等方面的功能。以下是每个参数的详细解释:
# 
# 1. net.ipv4.ip_forward = 1
#    - 这个参数启用了IPv4的IP转发功能,允许服务器作为网络路由器转发数据包。
# 
# 2. net.bridge.bridge-nf-call-iptables = 1
#    - 当使用网络桥接技术时,将数据包传递到iptables进行处理。
#   
# 3. fs.may_detach_mounts = 1
#    - 允许在挂载文件系统时,允许被其他进程使用。
#   
# 4. vm.overcommit_memory=1
#    - 该设置允许原始的内存过量分配策略,当系统的内存已经被完全使用时,系统仍然会分配额外的内存。
# 
# 5. vm.panic_on_oom=0
#    - 当系统内存不足(OOM)时,禁用系统崩溃和重启。
# 
# 6. fs.inotify.max_user_watches=89100
#    - 设置系统允许一个用户的inotify实例可以监控的文件数目的上限。
# 
# 7. fs.file-max=52706963
#    - 设置系统同时打开的文件数的上限。
# 
# 8. fs.nr_open=52706963
#    - 设置系统同时打开的文件描述符数的上限。
# 
# 9. net.netfilter.nf_conntrack_max=2310720
#    - 设置系统可以创建的网络连接跟踪表项的最大数量。
# 
# 10. net.ipv4.tcp_keepalive_time = 600
#     - 设置TCP套接字的空闲超时时间(秒),超过该时间没有活动数据时,内核会发送心跳包。
# 
# 11. net.ipv4.tcp_keepalive_probes = 3
#     - 设置未收到响应的TCP心跳探测次数。
# 
# 12. net.ipv4.tcp_keepalive_intvl = 15
#     - 设置TCP心跳探测的时间间隔(秒)。
# 
# 13. net.ipv4.tcp_max_tw_buckets = 36000
#     - 设置系统可以使用的TIME_WAIT套接字的最大数量。
# 
# 14. net.ipv4.tcp_tw_reuse = 1
#     - 启用TIME_WAIT套接字的重新利用,允许新的套接字使用旧的TIME_WAIT套接字。
# 
# 15. net.ipv4.tcp_max_orphans = 327680
#     - 设置系统可以同时存在的TCP套接字垃圾回收包裹数的最大数量。
# 
# 16. net.ipv4.tcp_orphan_retries = 3
#     - 设置系统对于孤立的TCP套接字的重试次数。
# 
# 17. net.ipv4.tcp_syncookies = 1
#     - 启用TCP SYN cookies保护,用于防止SYN洪泛攻击。
# 
# 18. net.ipv4.tcp_max_syn_backlog = 16384
#     - 设置新的TCP连接的半连接数(半连接队列)的最大长度。
# 
# 19. net.ipv4.ip_conntrack_max = 65536
#     - 设置系统可以创建的网络连接跟踪表项的最大数量。
# 
# 20. net.ipv4.tcp_timestamps = 0
#     - 关闭TCP时间戳功能,用于提供更好的安全性。
# 
# 21. net.core.somaxconn = 16384
#     - 设置系统核心层的连接队列的最大值。
# 
# 22. net.ipv6.conf.all.disable_ipv6 = 0
#     - 启用IPv6协议。
# 
# 23. net.ipv6.conf.default.disable_ipv6 = 0
#     - 启用IPv6协议。
# 
# 24. net.ipv6.conf.lo.disable_ipv6 = 0
#     - 启用IPv6协议。
# 
# 25. net.ipv6.conf.all.forwarding = 1
#     - 允许IPv6数据包转发。

1.4 时间同步

rm -rf /etc/localtime
ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
yum -y install ntpdate
ntpdate ntp1.aliyun.com

二、Docker及cri-docker安装

2.1 docker 安装

yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum -y install docker-ce

cat  > /etc/docker/daemon.json << EOF
{
   "registry-mirrors":["https://docker.mrlch.online"],
   "exec-opts": ["native.cgroupdriver=systemd"],
   "insecure-registries": ["home.mrlch.cn:8888"],
    "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

systemctl daemon-reload
systemctl enable --now docker

2.2 cri-docker 安装

wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.15/cri-dockerd-0.3.15.amd64.tgz

tar xf cri-dockerd-0.3.15.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/local/bin/

/usr/lib/systemd/system/cri-docker.service

cat <<"EOF" > /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify

ExecStart=/usr/local/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7

ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

StartLimitBurst=3

StartLimitInterval=60s

LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target

EOF

/usr/lib/systemd/system/cri-docker.socket

cat <<"EOF" > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target

EOF
设置服务开机自启动
systemctl daemon-reload
systemctl enable cri-dockerd.service
systemctl restart cri-dockerd.service

三、配置负载均衡

三台master节点安装keepalived

yum install keepalived -y

192.168.10.232

vi /etc/keepalived/keepalived.conf
global_defs {
    router_id master1
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 60
    priority 130
    advert_int 3                          
    track_interface {
        eth0
    }
    #设定keepalived为非抢占模式
    nopreempt
    #设定单播有很多好处,比如很多网络是不允许VRRP多播的
    #而设定固定范围的话就可以越过这个限制
    unicast_src_ip 192.168.10.232
    unicast_peer {
        192.168.10.229
        192.168.10.230
    }
    virtual_ipaddress {
        192.168.10.251
    }
    track_script {
        chk_haproxy
    }
}

192.168.10.229

vi /etc/keepalived/keepalived.conf
global_defs {
    router_id master2
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 60
    priority 131
    advert_int 3                          
    track_interface {
        eth0
    }
    #设定keepalived为非抢占模式
    nopreempt
    #设定单播有很多好处,比如很多网络是不允许VRRP多播的
    #而设定固定范围的话就可以越过这个限制
    unicast_src_ip 192.168.10.229
    unicast_peer {
        192.168.10.232
        192.168.10.230
    }
    virtual_ipaddress {
        192.168.10.251
    }
    track_script {
        chk_haproxy
    }
}

192.168.10.230

vi /etc/keepalived/keepalived.conf
global_defs {
    router_id master3
}
vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 60
    priority 132
    advert_int 3                          
    track_interface {
        eth0
    }
    #设定keepalived为非抢占模式
    nopreempt
    #设定单播有很多好处,比如很多网络是不允许VRRP多播的
    #而设定固定范围的话就可以越过这个限制
    unicast_src_ip 192.168.10.230
    unicast_peer {
        192.168.10.229
        192.168.10.232
    }
    virtual_ipaddress {
        192.168.10.251
    }
    track_script {
        chk_haproxy
    }
}

启动keepalived

systemctl daemon-reload
systemctl start keepalived
systemctl enable keepalived

四、安装kubeadm、kubelet、kubectl

cat <<"EOF" > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.30/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.30/rpm/repodata/repomd.xml.key

EOF

#指定源安装组件,同时启动kubelet节点开机自启

yum install  -y kubelet kubeadm kubectl --disableexcludes=kubernetes
systemctl enable kubelet
systemctl start kubelet

编译kubeadm、kubelet、kubectl证书100年 参考:https://www.cnblogs.com/niuben/p/18120379

五、初始化安装Master

注意:此处应停止其他两台master节点的keepalived,等待master节点都部署完毕后再开启 systemctl stop keepalived 设置kubeadm参数,其中一台master机器操作即可 192.168.10.232 kubeadm.yaml

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 72h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.10.232
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  kubeletExtraArgs:
    # 这里使用maser01的IP
    node-ip: 192.168.10.232
  taints:
  - effect: PreferNoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - localhost
  - kubernetes
  - kubernetes.default
  - kubernetes.default.svc
  - kubernetes.default.svc.cluster
  - kubernetes.default.svc.cluster.local
  - 127.0.0.1
  - 0:0:0:0:0:0:0:1
  - 172.23.0.1
  - 192.168.10.229
  - 192.168.10.230
  - 192.168.10.232
  - 192.168.10.251
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
kind: ClusterConfiguration
# 设置安装的版本
kubernetesVersion: 1.31.0
imageRepository: registry.aliyuncs.com/google_containers
networking:
  dnsDomain: cluster.local
  podSubnet: 172.28.0.0/12
  serviceSubnet: 10.96.0.0/16
scheduler: {}
# 这里使用的是负载地址
controlPlaneEndpoint: "192.168.10.251:6443"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
cgroupDriver: systemd
logging: {}
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s

执行初始化

kubeadm init --config=kubeadm.yaml

六、加入其他Master节点

6.1 拷贝上步骤证书到master节点

# 使用脚本将这如果你睡拷贝到其他maser节点
USER=root
CONTROL_PLANE_IPS="192.168.10.239 192.168.10.230"
for host in ${CONTROL_PLANE_IPS}; do
    scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt
    # 如果你正使用外部 etcd,忽略下一行
    scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key
done

# 在其他的maser上面执行 ,将证书文件放入所需目录
USER=root
mkdir -p /etc/kubernetes/pki/etcd
mv /${USER}/ca.crt /etc/kubernetes/pki/
mv /${USER}/ca.key /etc/kubernetes/pki/
mv /${USER}/sa.pub /etc/kubernetes/pki/
mv /${USER}/sa.key /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
# 如果你正使用外部 etcd,忽略下一行
mv /${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key

6.2 192.168.10.229

kubeadm-join-master-2.yaml

apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
controlPlane:
  localAPIEndpoint:
    advertiseAddress: "192.168.10.229"
    bindPort: 6443
discovery:
  bootstrapToken:
    apiServerEndpoint: 192.168.10.251:6443
    token: "abcdef.0123456789abcdef"
    caCertHashes:
    - "sha256:3e6859e3f45c2c9516e5e80639f4cdf0b108e67478059192d4c3089e59cb8932"
    # 请更改上面的认证信息,使之与你的集群中实际使用的令牌和 CA 证书匹配
nodeRegistration:
  kubeletExtraArgs:
    node-ip: 192.168.10.229
  criSocket: unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
kubeadm join --config=kubeadm-join-master-2.yaml

6.3 192.168.10.230

kubeadm-join-master-3.yaml

apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
controlPlane:
  localAPIEndpoint:
    advertiseAddress: "192.168.10.230"
    bindPort: 6443
discovery:
  bootstrapToken:
    apiServerEndpoint: 192.168.10.251:6443
    token: "abcdef.0123456789abcdef"
    caCertHashes:
    - "sha256:3e6859e3f45c2c9516e5e80639f4cdf0b108e67478059192d4c3089e59cb8932"
    # 请更改上面的认证信息,使之与你的集群中实际使用的令牌和 CA 证书匹配
nodeRegistration:
  kubeletExtraArgs:
    node-ip: 192.168.10.230
  criSocket: unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
kubeadm join --config=kubeadm-join-master-3.yaml

6.4 安装完毕即可看到节点

[root@k8s-master-prod net]# mkdir -p /root/.kube
[root@k8s-master-prod net]# cp /etc/kubernetes/admin.conf /root/.kube/config
[root@k8s-master-prod net]# kubectl get no
NAME                STATUS        ROLES           AGE    VERSION
k8s-master-prod     NotReady      control-plane   3h1m   v1.31.0-cert-99y
k8s-master-prod-2   NotReady      control-plane   138m   v1.31.0-cert-99y
k8s-master-prod-3   NotReady      control-plane   140m   v1.31.0-cert-99y

七、安装网络插件(calico BGP)

wget https://mirrors.chenby.cn/https://github.com/projectcalico/calico/blob/master/manifests/calico-typha.yaml

vim calico-typha.yaml
# calico-config ConfigMap处
    "ipam": {
        "type": "calico-ipam",
    },
    - name: IP
      value: "autodetect"

    - name: CALICO_IPV4POOL_CIDR
      value: "172.16.0.0/12"

# 此处可以修改镜像避免无法拉取
sed -i "s#docker.io/calico/#docker.xx.xx/calico/#g" calico-typha.yaml
kubectl apply -f calico-typha.yaml

[root@k8s-master-prod net]# kubectl get po -n kube-system | grep calico
calico-kube-controllers-d64cb6586-f6vds     1/1     Running   0             40m
calico-node-8k2pq                           1/1     Running   2 (44m ago)   47m
calico-node-htmk2                           1/1     Running   2 (44m ago)   47m
calico-node-rcwv6                           1/1     Running   3 (44m ago)   51m
calico-typha-544c49d8c6-cm8xr               1/1     Running   0             47m

[root@k8s-master-prod net]# kubectl get no -w
NAME                STATUS   ROLES           AGE    VERSION
k8s-master-prod     Ready    control-plane   3h2m   v1.31.0-cert-99y
k8s-master-prod-2   Ready    control-plane   139m   v1.31.0-cert-99y
k8s-master-prod-3   Ready    control-plane   141m   v1.31.0-cert-99y

八、加入node节点

cat > kubeadm-join-node-01.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
  bootstrapToken:
    apiServerEndpoint: 192.168.10.251:6443
    token: "abcdef.0123456789abcdef"
    caCertHashes:
    - "sha256:3e6859e3f45c2c9516e5e80639f4cdf0b108e67478059192d4c3089e59cb8932"
    # 请更改上面的认证信息,使之与你的集群中实际使用的令牌和 CA 证书匹配
nodeRegistration:
  kubeletExtraArgs:
    node-ip: 192.168.10.226
  criSocket: unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  name: 192.168.10.226
EOF

kubeadm join --config=kubeadm-join-node-01.yaml

ETCD(此章节忽略,kubeadm会自动安装)

下载ECTD包

# 下载安装包
wget https://mirrors.chenby.cn/https://github.com/etcd-io/etcd/releases/download/v3.5.12/etcd-v3.5.12-linux-amd64.tar.gz

tar -xf etcd-v3.5.12-linux-amd64.tar.gz && mv etcd-*/etcd /usr/local/bin/ && mv etcd-*/etcdctl /usr/local/bin/

# 将etcd  etcdctl分别复制到其他master节点

签发ETCD证书

下载证书签发工具

在其中一台机器安装证书工具及证书文件

# master-1节点下载证书生成工具
wget "https://mirrors.chenby.cn/https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssl_1.6.4_linux_amd64" -O /usr/local/bin/cfssl
wget "https://mirrors.chenby.cn/https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssljson_1.6.4_linux_amd64" -O /usr/local/bin/cfssljson

# 软件包内有
cp cfssl_*_linux_amd64 /usr/local/bin/cfssl
cp cfssljson_*_linux_amd64 /usr/local/bin/cfssljson

# 添加执行权限
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson

生成ETCD证书

# 写入生成证书所需的配置文件
cat > ca-config.json << EOF 
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF
# 这段配置文件是用于配置加密和认证签名的一些参数。
# 
# 在这里,有两个部分:`signing`和`profiles`。
# 
# `signing`包含了默认签名配置和配置文件。
# 默认签名配置`default`指定了证书的过期时间为`876000h`。`876000h`表示证书有效期为100年。
# 
# `profiles`部分定义了不同的证书配置文件。
# 在这里,只有一个配置文件`kubernetes`。它包含了以下`usages`和过期时间`expiry`:
# 
# 1. `signing`:用于对其他证书进行签名
# 2. `key encipherment`:用于加密和解密传输数据
# 3. `server auth`:用于服务器身份验证
# 4. `client auth`:用于客户端身份验证
# 
# 对于`kubernetes`配置文件,证书的过期时间也是`876000h`,即100年。

cat > etcd-ca-csr.json  << EOF 
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF
# 这是一个用于生成证书签名请求(Certificate Signing Request,CSR)的JSON配置文件。JSON配置文件指定了生成证书签名请求所需的数据。
# 
# - "CN": "etcd" 指定了希望生成的证书的CN字段(Common Name),即证书的主题,通常是该证书标识的实体的名称。
# - "key": {} 指定了生成证书所使用的密钥的配置信息。"algo": "rsa" 指定了密钥的算法为RSA,"size": 2048 指定了密钥的长度为2048位。
# - "names": [] 包含了生成证书时所需的实体信息。在这个例子中,只包含了一个实体,其相关信息如下:
#   - "C": "CN" 指定了实体的国家/地区代码,这里是中国。
#   - "ST": "Beijing" 指定了实体所在的省/州。
#   - "L": "Beijing" 指定了实体所在的城市。
#   - "O": "etcd" 指定了实体的组织名称。
#   - "OU": "Etcd Security" 指定了实体所属的组织单位。
# - "ca": {} 指定了生成证书时所需的CA(Certificate Authority)配置信息。
#   - "expiry": "876000h" 指定了证书的有效期,这里是876000小时。
# 
# 生成证书签名请求时,可以使用这个JSON配置文件作为输入,根据配置文件中的信息生成相应的CSR文件。然后,可以将CSR文件发送给CA进行签名,以获得有效的证书。

# 生成etcd证书和etcd证书的key(如果你觉得以后可能会扩容,可以在ip那多写几个预留出来)
# 若没有IPv6 可删除可保留 

cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
# 具体的解释如下:
# 
# cfssl是一个用于生成TLS/SSL证书的工具,它支持PKI、JSON格式配置文件以及与许多其他集成工具的配合使用。
# 
# gencert参数表示生成证书的操作。-initca参数表示初始化一个CA(证书颁发机构)。CA是用于签发其他证书的根证书。etcd-ca-csr.json是一个JSON格式的配置文件,其中包含了CA的详细信息,如私钥、公钥、有效期等。这个文件提供了生成CA证书所需的信息。
# 
# | 符号表示将上一个命令的输出作为下一个命令的输入。
# 
# cfssljson是cfssl工具的一个子命令,用于格式化cfssl生成的JSON数据。 -bare参数表示直接输出裸证书,即只生成证书文件,不包含其他格式的文件。/etc/etcd/ssl/etcd-ca是指定生成的证书文件的路径和名称。
# 
# 所以,这条命令的含义是使用cfssl工具根据配置文件ca-csr.json生成一个CA证书,并将证书文件保存在/etc/etcd/ssl/etcd-ca路径下。

cat > etcd-csr.json << EOF 
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}
EOF
# 这段代码是一个JSON格式的配置文件,用于生成一个证书签名请求(Certificate Signing Request,CSR)。
# 
# 首先,"CN"字段指定了该证书的通用名称(Common Name),这里设为"etcd"。
# 
# 接下来,"key"字段指定了密钥的算法("algo"字段)和长度("size"字段),此处使用的是RSA算法,密钥长度为2048位。
# 
# 最后,"names"字段是一个数组,其中包含了一个名字对象,用于指定证书中的一些其他信息。这个名字对象包含了以下字段:
# - "C"字段指定了国家代码(Country),这里设置为"CN"。
# - "ST"字段指定了省份(State)或地区,这里设置为"Beijing"。
# - "L"字段指定了城市(Locality),这里设置为"Beijing"。
# - "O"字段指定了组织(Organization),这里设置为"etcd"。
# - "OU"字段指定了组织单元(Organizational Unit),这里设置为"Etcd Security"。
# 
# 这些字段将作为证书的一部分,用于标识和验证证书的使用范围和颁发者等信息。

cfssl gencert    -ca=/etc/etcd/ssl/etcd-ca.pem    -ca-key=/etc/etcd/ssl/etcd-ca-key.pem    -config=ca-config.json    -hostname=127.0.0.1,k8s-master-prod-2,k8s-master-prod-3,k8s-master-prod,192.168.10.229,192.168.10.230,192.168.10.232,192.168.10.251    -profile=kubernetes    etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
# 这是一条使用cfssl生成etcd证书的命令,下面是各个参数的解释:
# 
# -ca=/etc/etcd/ssl/etcd-ca.pem:指定用于签名etcd证书的CA文件的路径。
# -ca-key=/etc/etcd/ssl/etcd-ca-key.pem:指定用于签名etcd证书的CA私钥文件的路径。
# -config=ca-config.json:指定CA配置文件的路径,该文件定义了证书的有效期、加密算法等设置。
# -hostname=xxxx:指定要为etcd生成证书的主机名和IP地址列表。
# -profile=kubernetes:指定使用的证书配置文件,该文件定义了证书的用途和扩展属性。
# etcd-csr.json:指定etcd证书请求的JSON文件的路径,该文件包含了证书请求的详细信息。
# | cfssljson -bare /etc/etcd/ssl/etcd:通过管道将cfssl命令的输出传递给cfssljson命令,并使用-bare参数指定输出文件的前缀路径,这里将生成etcd证书的.pem和-key.pem文件。
# 
# 这条命令的作用是使用指定的CA证书和私钥,根据证书请求的JSON文件和配置文件生成etcd的证书文件。

拷贝证书到其他etcd节点

其他两台节点操作
mkdir /etc/etcd/ssl -p 
将上一步中/etc/etcd/ssl种的文件拷贝至其他机器的相同路径下

配置ETCD

192.168.10.232

cat > /etc/etcd/etcd.config.yml << EOF 
name: 'etcd-192.168.10.232'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.10.232:2380'
listen-client-urls: 'https://192.168.10.232:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.10.232:2380'
advertise-client-urls: 'https://192.168.10.232:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-192.168.10.229=https://192.168.10.229:2380,etcd-192.168.10.230=https://192.168.10.230:2380,etcd-192.168.10.232=https://192.168.10.232:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

192.168.10.230

cat > /etc/etcd/etcd.config.yml << EOF 
name: 'etcd-192.168.10.230'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.10.230:2380'
listen-client-urls: 'https://192.168.10.230:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.10.230:2380'
advertise-client-urls: 'https://192.168.10.230:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-192.168.10.229=https://192.168.10.229:2380,etcd-192.168.10.230=https://192.168.10.230:2380,etcd-192.168.10.232=https://192.168.10.232:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

192.168.10.229

cat > /etc/etcd/etcd.config.yml << EOF 
name: 'etcd-192.168.10.229'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.10.229:2380'
listen-client-urls: 'https://192.168.10.229:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.10.229:2380'
advertise-client-urls: 'https://192.168.10.229:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-192.168.10.229=https://192.168.10.229:2380,etcd-192.168.10.230=https://192.168.10.230:2380,etcd-192.168.10.232=https://192.168.10.232:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

ETCD 参数详解

这个配置文件是用于 etcd 集群的配置,其中包含了一些重要的参数和选项:

- `name`:指定了当前节点的名称,用于集群中区分不同的节点。
- `data-dir`:指定了 etcd 数据的存储目录。
- `wal-dir`:指定了 etcd 数据写入磁盘的目录。
- `snapshot-count`:指定了触发快照的事务数量。
- `heartbeat-interval`:指定了 etcd 集群中节点之间的心跳间隔。
- `election-timeout`:指定了选举超时时间。
- `quota-backend-bytes`:指定了存储的限额,0 表示无限制。
- `listen-peer-urls`:指定了节点之间通信的 URL,使用 HTTPS 协议。
- `listen-client-urls`:指定了客户端访问 etcd 集群的 URL,同时提供了本地访问的 URL。
- `max-snapshots`:指定了快照保留的数量。
- `max-wals`:指定了日志保留的数量。
- `initial-advertise-peer-urls`:指定了节点之间通信的初始 URL。
- `advertise-client-urls`:指定了客户端访问 etcd 集群的初始 URL。
- `discovery`:定义了 etcd 集群发现相关的选项。
- `initial-cluster`:指定了 etcd 集群的初始成员。
- `initial-cluster-token`:指定了集群的 token。
- `initial-cluster-state`:指定了集群的初始状态。
- `strict-reconfig-check`:指定了严格的重新配置检查选项。
- `enable-v2`:启用了 v2 API。
- `enable-pprof`:启用了性能分析。
- `proxy`:设置了代理模式。
- `client-transport-security`:客户端的传输安全配置。
- `peer-transport-security`:节点之间的传输安全配置。
- `debug`:是否启用调试模式。
- `log-package-levels`:日志的输出级别。
- `log-outputs`:指定了日志的输出类型。
- `force-new-cluster`:是否强制创建一个新的集群。

启动ETCD

cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF

mkdir /etc/kubernetes/pki/etcd
ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/

systemctl daemon-reload
# 用于重新加载systemd管理的单位文件。当你新增或修改了某个单位文件(如.service文件、.socket文件等),需要运行该命令来刷新systemd对该文件的配置。

systemctl enable --now etcd.service
# 启用并立即启动etcd.service单元。etcd.service是etcd守护进程的systemd服务单元。

systemctl restart etcd.service
# 重启etcd.service单元,即重新启动etcd守护进程。

systemctl status etcd.service
# etcd.service单元的当前状态,包括运行状态、是否启用等信息。

查看ETCD状态

[root@k8s-master-prod ssl]# etcdctl --endpoints="192.168.10.229:2379,192.168.10.230:2379,192.168.10.232:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem  endpoint status --write-out=table
+---------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|      ENDPOINT       |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+---------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 192.168.10.229:2379 | 11d77d624073789a |  3.5.12 |   20 kB |     false |      false |         3 |         12 |                 12 |        |
| 192.168.10.230:2379 | 5bc6ac691ac00f90 |  3.5.12 |   20 kB |      true |      false |         3 |         12 |                 12 |        |
| 192.168.10.232:2379 | bb7b5b53bd8563ce |  3.5.12 |   20 kB |     false |      false |         3 |         12 |                 12 |        |
+---------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
[root@k8s-master-prod ssl]#

发表评论

您的电子邮箱地址不会被公开。 必填项已用 * 标注

Captcha Code