温馨提示×

温馨提示×

您好,登录后才能下订单哦!

密码登录×
登录注册×
其他方式登录
点击 登录注册 即表示同意《亿速云用户服务条款》

K8S实践Ⅷ(HA集群部署)

发布时间:2020-07-30 16:21:51 来源:网络 阅读:301 作者:一语成谶灬 栏目:系统运维

一、环境准备

1.集群规划
主机名 IP 角色
VIP 20.0.20.200 master-VIP
k8s-master01 20.0.20.201 master
k8s-master02 20.0.20.202 master
k8s-master03 20.0.20.203 master
k8s-node01 20.0.20.204 node
k8s-node02 20.0.20.205 node
k8s-node03 20.0.20.206 node
2.基础环境配置
  • 关闭防火墙
  • 关闭selinux
  • 配置hosts
  • 配置master之间的ssh免密登录

  • 关闭swap分区
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
  • 配置时钟同步
systemctl start chronyd
systemctl enable chronyd
timedatectl set-timezone "Asia/Shanghai"
  • 配置内核参数
    cat > /etc/sysctl.d/k8s.conf <<EOF
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_nonlocal_bind = 1
    net.ipv4.ip_forward = 1
    vm.swappiness=0
    EOF
    sysctl -p /etc/sysctl.d/k8s.conf
  • 开启ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack_ipv4"
for kernel_module in \${ipvs_modules}; do
    /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
    if [ $? -eq 0 ]; then
        /sbin/modprobe \${kernel_module}
    fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
  • 安装docker

二、在master上安装HAproxy和keepalived

1.安装相关软件
yum install -y keepalived haproxy ipvsadm socat
2.配置keepalived
# cat /etc/keepalived/keepalived.conf
global_defs {
   router_id master01
}

vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 88
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 2323
    }
    virtual_ipaddress {
        20.0.20.200/24
    }
}

三个节点配置区别在于route_id、state、priority这上地方

3.配置HAproxy
# cat /etc/haproxy/haproxy.cfg
global
    log         127.0.0.1 local3
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     32768   #设定每个haproxy进程所接受的最大并发连接数
    user        haproxy
    group       haproxy
    daemon      #以守护进程的方式工作于后台
    nbproc      1   #指定启动的haproxy进程的个数
    stats socket /var/lib/haproxy/stats

defaults
    mode                    tcp
    log                     global
    option                  tcplog
    option                  dontlognull
    option                  redispatch
    retries                 3
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout check           10s

listen stats
    mode   http
    bind :8888      #管理页面登陆端口
    stats   enable  #启用管理页面
    stats   uri     /admin?stats    #管理页面登陆地址
    stats   auth    admin:admin     #管理页面的用户账号
    stats   admin   if TRUE

frontend  k8s_https
    mode    tcp
    bind    *:8443
    maxconn 2000
    default_backend     https-api

backend https-api
    balance      roundrobin
    server master01 20.0.20.201:6443  check inter 5000 fall 5 rise 3 weight 1
    server master01 20.0.20.202:6443  check inter 5000 fall 5 rise 3 weight 1
    server master01 20.0.20.203:6443  check inter 5000 fall 5 rise 3 weight 1
4.启动服务
systemctl enable keepalived && systemctl start keepalived
systemctl enable haproxy && systemctl start haproxy

三、部署Master

1.配置yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
2.安装、启动
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
systemctl enable kubelet && systemctl start kubelet
3.配置初始化文件
# kubeadm config print init-defaults > kubeadm.conf
# cat kubeadm.conf
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 20.0.20.201
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: gcr.azk8s.cn/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.15.3
controlPlaneEndpoint: 20.0.20.200:6443
networking:
  dnsDomain: cluster.local
  podSubnet: 192.168.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
4.拉取所需要的镜像
# kubeadm config images list --config kubeadm.conf
# kubeadm config images pull --config kubeadm.conf
[config/images] Pulled gcr.azk8s.cn/google_containers/kube-apiserver:v1.15.3
[config/images] Pulled gcr.azk8s.cn/google_containers/kube-controller-manager:v1.15.3
[config/images] Pulled gcr.azk8s.cn/google_containers/kube-scheduler:v1.15.3
[config/images] Pulled gcr.azk8s.cn/google_containers/kube-proxy:v1.15.3
[config/images] Pulled gcr.azk8s.cn/google_containers/pause:3.1
[config/images] Pulled gcr.azk8s.cn/google_containers/etcd:3.3.10
[config/images] Pulled gcr.azk8s.cn/google_containers/coredns:1.3.1
5.初始化master01
[root@k8s-master01 ~]# kubeadm init --config kubeadm.conf
[init] Using Kubernetes version: v1.15.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [20.0.20.201 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [20.0.20.201 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 20.0.20.201 20.0.20.200]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 37.001511 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities 
and service account keys on each node and then running the following as root:

  kubeadm join 20.0.20.200:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:52dab3f1cfe0b0c202d676175f1216cf3c5919558d50805d595b74f480bcf75b \
    --control-plane       

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 20.0.20.200:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:52dab3f1cfe0b0c202d676175f1216cf3c5919558d50805d595b74f480bcf75b 
6.配置kubeconfig
[root@k8s-master01 ~]# mkdir -p $HOME/.kube
[root@k8s-master01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
7.复制证书至其他master节点
# cat ./scp_pki.sh
USER=root
IP="20.0.20.202 20.0.20.203"
for host in ${IP}; do
    ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
    scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
8.将master02和master03加入集群
[root@k8s-master02 ~]# kubeadm join 20.0.20.200:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:52dab3f1cfe0b0c202d676175f1216cf3c5919558d50805d595b74f480bcf75b \
>     --control-plane
[root@k8s-master02 ~]# mkdir -p $HOME/.kube
[root@k8s-master02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
9.检查节点状态
[root@k8s-master01 ~]# kubectl get node
NAME           STATUS     ROLES    AGE     VERSION
k8s-master01   NotReady   master   2m27s   v1.15.3
k8s-master02   NotReady   master   45s     v1.15.3
k8s-master03   NotReady   master   46s     v1.15.3
10.配置网络
# kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
serviceaccount/weave-net created
clusterrole.rbac.authorization.k8s.io/weave-net created
clusterrolebinding.rbac.authorization.k8s.io/weave-net created
role.rbac.authorization.k8s.io/weave-net created
rolebinding.rbac.authorization.k8s.io/weave-net created
daemonset.extensions/weave-net created
# kubectl get pod -A
NAMESPACE     NAME                                   READY   STATUS    RESTARTS   AGE
kube-system   coredns-cf8fb6d7f-64hsx                1/1     Running   0          45m
kube-system   coredns-cf8fb6d7f-lqws8                1/1     Running   0          45m
kube-system   etcd-k8s-master01                      1/1     Running   0          44m
kube-system   etcd-k8s-master02                      1/1     Running   0          43m
kube-system   etcd-k8s-master03                      1/1     Running   0          44m
kube-system   kube-apiserver-k8s-master01            1/1     Running   0          44m
kube-system   kube-apiserver-k8s-master02            1/1     Running   0          42m
kube-system   kube-apiserver-k8s-master03            1/1     Running   0          44m
kube-system   kube-controller-manager-k8s-master01   1/1     Running   1          44m
kube-system   kube-controller-manager-k8s-master02   1/1     Running   0          42m
kube-system   kube-controller-manager-k8s-master03   1/1     Running   0          44m
kube-system   kube-proxy-6gwzs                       1/1     Running   0          44m
kube-system   kube-proxy-dppmv                       1/1     Running   0          37m
kube-system   kube-proxy-msz97                       1/1     Running   0          43m
kube-system   kube-proxy-tgkr9                       1/1     Running   0          37m
kube-system   kube-proxy-tw4lh                       1/1     Running   0          37m
kube-system   kube-proxy-zbf5f                       1/1     Running   0          45m
kube-system   kube-scheduler-k8s-master01            1/1     Running   1          44m
kube-system   kube-scheduler-k8s-master02            1/1     Running   0          42m
kube-system   kube-scheduler-k8s-master03            1/1     Running   0          44m
kube-system   weave-net-6b7px                        2/2     Running   0          6m12s
kube-system   weave-net-6b8wn                        2/2     Running   0          6m12s
kube-system   weave-net-dq7sz                        2/2     Running   0          6m12s
kube-system   weave-net-mfv8t                        2/2     Running   0          6m12s
kube-system   weave-net-t76p9                        2/2     Running   0          6m12s
kube-system   weave-net-wctz4                        2/2     Running   0          6m12s
11.查看ipvs是否启用
[root@k8s-master01 ~]# kubectl get pod -n kube-system | grep kube-proxy
kube-proxy-6gwzs                           1/1     Running   0          2m47s
kube-proxy-msz97                           1/1     Running   0          2m37s
kube-proxy-zbf5f                           1/1     Running   0          4m8s
[root@k8s-master01 ~]# kubectl logs kube-proxy-6gwzs -n kube-system
I0909 06:48:02.103768       1 server_others.go:170] Using ipvs Proxier.
W0909 06:48:02.104548       1 proxier.go:401] IPVS scheduler not specified, use rr by default
I0909 06:48:02.104949       1 server.go:534] Version: v1.15.3
I0909 06:48:02.110944       1 conntrack.go:52] Setting nf_conntrack_max to 131072
I0909 06:48:02.111114       1 config.go:187] Starting service config controller
I0909 06:48:02.111143       1 controller_utils.go:1029] Waiting for caches to sync for service config controller
I0909 06:48:02.111174       1 config.go:96] Starting endpoints config controller
I0909 06:48:02.111184       1 controller_utils.go:1029] Waiting for caches to sync for endpoints config controller
I0909 06:48:02.211286       1 controller_utils.go:1036] Caches are synced for endpoints config controller
I0909 06:48:02.211338       1 controller_utils.go:1036] Caches are synced for service config controller
12.配置命令自动补全
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

四、配置Worker节点加入

1.将node节点加入集群
[root@k8s-node01 ~]# kubeadm join 20.0.20.200:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:52dab3f1cfe0b0c202d676175f1216cf3c5919558d50805d595b74f480bcf75b 
2.检查节点
[root@k8s-master01 ~]# kubectl get node
NAME           STATUS   ROLES    AGE     VERSION
k8s-master01   Ready    master   9m30s   v1.15.3
k8s-master02   Ready    master   7m48s   v1.15.3
k8s-master03   Ready    master   7m49s   v1.15.3
k8s-node01     Ready    <none>   94s     v1.15.3
k8s-node02     Ready    <none>   91s     v1.15.3
k8s-node03     Ready    <none>   90s     v1.15.3

五、测试集群

1.测试pod
# kubectl run nginx --image=nginx:1.14 --replicas=2
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
# kubectl get pod -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP          NODE         NOMINATED NODE   READINESS GATES
nginx-7b4d6c6559-82jd7   1/1     Running   0          81s   10.46.0.1   k8s-node03   <none>           <none>
nginx-7b4d6c6559-hzlx6   1/1     Running   0          81s   10.45.0.0   k8s-node01   <none>           <none>
# curl 10.46.0.1
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
2.测试dns
# kubectl run curl --image=radial/busyboxplus:curl -it
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
If you don't see a command prompt, try pressing enter.
[ root@curl-6bf6db5c4f-472s2:/ ]$ nslookup kubernetes.default
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes.default
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
向AI问一下细节

免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。

k8s
AI