一键快速部署Kubernetes高可用集群 |
发表者:admin分类:云计算容器2022-12-12 14:46:02 阅读[681] |
一键快速部署Kubernetes高可用集群
三个主节点、三个Node节点、两个用于负载平衡的节点以及一个虚拟IP地址。此示例中的虚拟IP地址也称为浮动IP地址。
也就是说,即使节点发生故障,也可以在节点之间交换IP地址,实现故障切换,实现高可用性。
由于资源限制服务器进行了复用,如下:
所有机器操作如下:
修改主机名
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02
[root'k8s-master01 k8s-master02 k8s-master03' -master01 ~]# MasterNodes=
[root'k8s-node01 k8s-node02' -master01 ~]# WorkNodes=
[rootfor NODE in $MasterNodes; do ssh-copy-id $NODE ;done -master01 ~]#
[rootfor NODE in $WorkNodes; do ssh-copy-id $NODE ;done -master01 ~]#
执行初始化脚本
[root -master01 ~]# vim init.sh
#!/bin/sh
echo "192.168.102.71 k8s-master01" >> /etc/hosts
echo "192.168.102.72 k8s-master02" >> /etc/hosts
echo "192.168.102.73 k8s-master03" >> /etc/hosts
echo "192.168.102.74 k8s-node01" >> /etc/hosts
echo "192.168.102.75 k8s-node02" >> /etc/hosts
systemctl stop firewalld
systemctl disable firewalld
swapoff -a
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
sed -ri 's/.*swap.*/#&/' /etc/fstab
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum install -y yum-utils device-mapper-persistent-data lvm2 epel-release install ipvsadm ipset sysstat conntrack libseccomp socat git conntrack ebtables ipset
yum install -y ntp
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' > /etc/timezone
ntpdate time2.aliyun.com
echo "*/1 * * * * ntpdate time2.aliyun.com" >> /etc/crontab
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce-20.10.7 docker-ce-cli-20.10.7 containerd.io-1.4.6
mkdir -p /opt/docker && mkdir /etc/docker
cat > /etc/docker/daemon.json <<-EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"registry-mirrors": ["https://7uuu3esz.mirror.aliyuncs.com","https://moefhjht.mirror.aliyuncs.com"],
"data-root": "/opt/docker"
}
EOF
systemctl daemon-reload && systemctl enable --now docker
[root'k8s-master02 k8s-master03 k8s-node01 k8s-node02' -master01 ~]# Nodes=
[rootfor node in $Nodes;do scp init.sh $node:/root/ ;done -master01 ~]#
[root'k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02' -master01 ~]# Nodes=
[rootfor node in $Nodes;do ssh $node 'sh /root/init.sh' ;done -master01 ~]#
所有节点内核升级
[root -master01 ~]# vim kernel.sh
#!/bin/sh
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install kernel-ml kernel-ml-devel -y
[root'k8s-master02 k8s-master03 k8s-node01 k8s-node02' -master01 ~]# Nodes=
[rootfor node in $Nodes;do scp kernel.sh $node:/root/ ;done -master01 ~]#
[root'k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02' -master01 ~]# Nodes=
[rootfor node in $Nodes;do ssh $node 'sh /root/kernel.sh' ;done -master01 ~]#
K8s-node01 192.168.102.74 操作下
~]# yum install keepalived haproxy psmisc -y
~]# cat /etc/haproxy/haproxy.cfg
global
log /dev/log local0 warning
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
log global
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
frontend kube-apiserver
bind *:6443
mode tcp
option tcplog
default_backend kube-apiserver
backend kube-apiserver
mode tcp
option tcplog
option tcp-check
balance roundrobin
inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server kube-apiserver-1 192.168.102.71:6443 check # Replace the IP address
server kube-apiserver-2 192.168.102.72:6443 check # Replace the IP address
server kube-apiserver-3 192.168.102.73:6443 check # Replace the IP address
~]# systemctl restart haproxy
~]# systemctl enable haproxy
#配置keepalived
~]# vim /etc/keepalived/keepalived.conf
global_defs {
notification_email {
}
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 2
weight -30
}
vrrp_instance haproxy-vip {
state MASTER
priority 100
interface eth0 # 网卡设备名
virtual_router_id 60
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
unicast_src_ip 192.168.102.74 # node01IP
unicast_peer {
# node02IP
}
virtual_ipaddress {
# The VIP address
}
track_script {
chk_haproxy
}
}
~]# systemctl restart keepalived
~]# systemctl enable keepalived
K8s-node02 192.168.102.74 操作下
~]# yum install keepalived haproxy psmisc -y
~]# cat /etc/haproxy/haproxy.cfg
global
log /dev/log local0 warning
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
log global
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
frontend kube-apiserver
bind *:6443
mode tcp
option tcplog
default_backend kube-apiserver
backend kube-apiserver
mode tcp
option tcplog
option tcp-check
balance roundrobin
inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server kube-apiserver-1 192.168.102.71:6443 check # Replace the IP address
server kube-apiserver-2 192.168.102.72:6443 check # Replace the IP address
server kube-apiserver-3 192.168.102.73:6443 check # Replace the IP address
~]# systemctl restart haproxy
~]# systemctl enable haproxy
~]# cat /etc/keepalived/keepalived.conf
global_defs {
notification_email {
}
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 2
weight -30
}
vrrp_instance haproxy-vip {
state BACKUP
priority 90
interface eth0 # 网卡设备名
virtual_router_id 60
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
unicast_src_ip 192.168.102.75 # node02IP
unicast_peer {
# node01IP
}
virtual_ipaddress {
# The VIP address
}
track_script {
chk_haproxy
}
}
~]# systemctl restart keepalived
~]# systemctl restart keepalived
检查VIP是否生成
[ ]
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:50:56:bb:85:de brd ff:ff:ff:ff:ff:ff
inet 192.168.102.74/24 brd 192.168.102.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.102.77/24 scope global secondary eth0
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:f4:99:5a:28 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
[ ]
PING 192.168.102.77 (192.168.102.77) 56(84) bytes of data.
64 bytes from 192.168.102.77: icmp_seq=1 ttl=64 time=0.051 ms
64 bytes from 192.168.102.77: icmp_seq=2 ttl=64 time=0.043 ms
KubeKey(由 Go 语言开发)是一种全新的安装工具,替代了以前使用的基于 ansible 的安装程序。KubeKey 为您提供灵活的安装选择,您可以一条命令安装 Kubernetes集群。(底层基于kubeadm方式)
下载一键安装工具并配置
~]# export KKZONE=cn
~]# curl -sfL https://get-kk.kubesphere.io | sh -
#生成安装集群的配置清单
KubeKey]# ./kk create config --with-kubernetes v1.21.5 -f k8s-cluster.yaml
#修改如下
apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
name: sample
spec:
hosts:
{name: k8s-master01, address: 192.168.102.71, internalAddress: 192.168.102.71, user: root, password: "123.com"}
{name: k8s-master02, address: 192.168.102.72, internalAddress: 192.168.102.72, user: root, password: "123.com"}
{name: k8s-master03, address: 192.168.102.73, internalAddress: 192.168.102.73, user: root, password: "123.com"}
{name: k8s-node01, address: 192.168.102.74, internalAddress: 192.168.102.74, user: root, password: "123.com"}
{name: k8s-node02, address: 192.168.102.75, internalAddress: 192.168.102.75, user: root, password: "123.com"}
roleGroups:
etcd:
k8s-master01
k8s-master02
k8s-master03
:
k8s-master01
k8s-master02
k8s-master03
worker:
k8s-node01
k8s-node02
controlPlaneEndpoint:
## Internal loadbalancer for apiservers
# internalLoadbalancer: haproxy
domain: lb.kubesphere.local
address: "192.168.102.77" #添加你的VIP
port: 6443
kubernetes:
version: v1.21.5
clusterName: cluster.local
autoRenewCerts: true
containerManager: docker
etcd:
type: kubekey
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
multusCNI:
enabled: false
registry:
privateRegistry: ""
namespaceOverride: ""
registryMirrors: []
insecureRegistries: []
addons: []
~]# ./kk create cluster -f k8s-cluster.yaml
[ ]
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 22m v1.21.5
k8s-master02 Ready control-plane,master 21m v1.21.5
k8s-master03 Ready control-plane,master 21m v1.21.5
k8s-node01 Ready worker 21m v1.21.5
k8s-node02 Ready worker 21m v1.21.5
[ ]
apiVersion: v1
clusters:
- cluster:
............
server: https://lb.kubesphere.local:6443 #次地址在hosts中做了映射就是我们的VIP
name: cluster.local
[ ]
.......
192.168.102.77 lb.kubesphere.local
转载请标明出处【一键快速部署Kubernetes高可用集群】。
《www.micoder.cc》
虚拟化云计算,系统运维,安全技术服务.
Tags: | [阅读全文...] |
最新评论