二进制部署kubernetes高可用集群

阿里云国内75折 回扣 微信号:monov8
阿里云国际,腾讯云国际,低至75折。AWS 93折 免费开户实名账号 代冲值 优惠多多 微信号:monov8 飞机:@monov6

二进制部署kubernetes高可用集群

一、单节点部署

1、集群节点规划均是24位掩码

负载均衡节点Master节点Node节点Harbor私有仓库节点
nginx1=10.4.7.23master1=10.4.7.11node1=10.4.7.2110.4.7.200
nginx2=10.4.7.24master2=10.4.7.12node2=10.4.7.22

2、基本环境准备所有节点上操作


# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
#关闭核心防护
find_key="SELINUX="
sed -ri "/^$find_key/c${find_key}disabled" /etc/selinux/config
更改主机名每个主机不一样
hostnamectl set-hostname master1
su

3、部署etcd

3.1,准备文件

mkdir k8s 
cd k8s/
ls		 ##从宿主机将所需要的个文件在这两个脚本一个目录
etcd-cert  etcd-cert.sh  etcd.sh
cd etcd-cert/
chmod +x cfssl cfssl-certinfo cfssljson
mv cfssl cfssl-certinfo cfssljson /usr/local/bin/
ls /usr/local/bin/	##查看下有有没有下面三个文件
cfssl  cfssl-certinfo  cfssljson
#cfssl生成证书工具
#cfssl-certinfo查看证书信息
#cfssljson通过传入json文件生成证书

3.2、制作CA证书

1、创建ca证书的配置文件
vim ca-config.json
{
  "signing": {
    "default": {   ##系统设定值
      "expiry": "87600h"  ##证书的有效期10年
    },
    "profiles": {   ##配置
      "www": {   ##台头是www
        "expiry": "87600h",  ##时效 87600h
        "usages": [   ##常见用法标签
          "signing",   ##签字签名
          "key encipherment",  ##加密
          "server auth",  ##服务端验证
          "client auth"   ##客户端验证
        ]
      }
    }
  }
}
2、创建ca证书的签名证书  ##看到csr这个就是签名的证书
vim ca-csr.json
{
    "CN":"etcd CA",   ##CN中国
    "key": {    ##秘钥
        "algo":"rsa",   ##秘钥类型采用rsa非对称秘钥
        "size":2048   ##秘钥长度
    },
    "names": [   ##证书的名称
        {
            "C":"CN",   ##来自于中国
            "L":"Beijing",  ##地区
            "ST":"Beijing"  ##时区
        }
    ]
}

3、用ca签名证书生成ca证书得到ca-key.pem ca.pem
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
ls	##能看到下面六个文件
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem  etcd-cert.sh
上面指令3的部分解释。
gencert -initca读取
ca-csr.json初始化用的
cfssljson -bare基本信息的读取时-bare
ca-key.pem ca.pem这两个是所获得的证书
ca-config.json做server要用到的

3.3、制作etcd节点通讯json生产秘钥文件

vim server-csr.json    ##通讯验证证书
{
    "CN": "etcd",
    "hosts": [   ##指明host的文件地址
    "10.4.7.11",
    "10.4.7.21",
    "10.4.7.22"
    ],
    "key": {
        "algo": "rsa",   ##给你ca相匹配的秘钥证明
        "size": 2048
    },
    "names": [
        {
          "C": "CN",
          "L": "Beijing",
          "ST": "Beijing"
        }
    ]
}

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

ls	##看一下会有下面八个证书#其中server-key.pem  server.pem是最重要的
ca-config.json  ca-csr.json  ca.pem        server.csr       server-key.pem
ca.csr          ca-key.pem   etcd-cert.sh  server-csr.json  server.pem


3.4、使用etcd证书搭建etcd集群

1、上传一个生成ETCD配置文件的脚本etcd.sh到 /root/k8s 目录下脚本内容如下
cd ..
tar zxf etcd-v3.3.10-linux-amd64.tar.gz
2、解压后到etcd-v3.3.10-linux-amd64下会生成两个文件etcd 和etcdctl是很重要的。
mkdir /opt/etcd/{cfg,bin,ssl} -p ##在opt目录中创建etcd集群的工作目录
ls /opt/etcd/
#bin  cfg  ssl   		##看是否创建成功
cd /root/k8s/etcd-v3.3.10-linux-amd64/
mv etcd etcdctl /opt/etcd/bin/ ##将解压后的文件放在/opt/etcd/bin 目录下
ls /opt/etcd/bin/   		##查看一下有两个文件了etcd  etcdctl
cd /root/k8s/etcd-cert/
cp *.pem /opt/etcd/ssl/  	##将所有的证书放在/opt/etcd/ssl中
ls /opt/etcd/ssl/
#ca-key.pem  ca.pem  server-key.pem  server.pem       ##再去查看下有没有
cd /root/k8s/  ##将这个三个压缩包gz结尾的从宿主机放在它下面
#cfssl.sh   etcd-v3.3.10-linux-amd64            kubernetes-server-linux-amd64.tar.gz
#etcd-cert  etcd-v3.3.10-linux-amd64.tar.gz
#etcd.sh    flannel-v0.10.0-linux-amd64.tar.gz
3、执行 etcd.sh 脚本产生etcd集群的配置脚本和服务启动脚本进入卡住状态等待其他节点加入
#注意修改成自己的ip地址《vim /root/k8s/etcd.sh这个文件中能看你配置的群集信息》
#注意修改成自己的ip地址《vim /root/k8s/etcd.sh这个文件中能看你配置的群集信息》
[root@localhost k8s]# bash etcd.sh etcd01 10.4.7.11 etcd02=https://10.4.7.21:2380,etcd03=https://10.4.7.22:2380


4、分发文件到node节点
scp -r /opt/etcd/ root@10.4.7.21:/opt/
scp -r /opt/etcd/ root@10.4.7.22:/opt/
#将所有的配置文件远程拷贝到对方的/opt目录下记得一定是这个目录不然你要重新改配置文件。
scp /usr/lib/systemd/system/etcd.service root@10.4.7.21:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/etcd.service root@10.4.7.22:/usr/lib/systemd/system/
#将启动脚本远程拷贝到node节点的启动脚本的位置


5、在node上面进行相关的更改节点node2的node1差不对这里不多解释
cd /opt/etcd/cfg       #
[root@node2 cfg]# vim etcd   ##修改集群的名称和ip

#[Member]    ##成员
ETCD_NAME="etcd02"    ##修改etcd节点的名称
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"  ##服务运行数据保存的路径(设置数据保存的目录)
ETCD_LISTEN_PEER_URLS="https://20.0.0.11:2380" ##监听的同伴通信的地址,比如http://ip:2380,如果有多个,使用逗号分隔。需要所有节点都能够访问,所以不要使用 localhost!(用于监听其他etcd member的url (统一资源定位器有叫网页地址))
ETCD_LISTEN_CLIENT_URLS="https://20.0.0.11:2379" ##监听的客户端服务地址(对外提供服务的地址CLIENT客户机)

#[Clustering]     #使成群cluster的现在分词或聚类
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://20.0.0.11:2380" ##对外公告的该节点同伴监听地址,这个值会告诉集群中其他节点
ETCD_ADVERTISE_CLIENT_URLS="https://20.0.0.11:2379"  ##对外公告的该节点客户端监听地址,这个值会告诉集群中其他节点。
ETCD_INITIAL_CLUSTER="etcd01=https://20.0.0.10:2380,etcd02=https://20.0.0.11:2380,etcd03=https://20.0.0.12:2380"    ##集群中所有节点的信息,格式为
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"  ##集群的ID,多个集群的时候,每个集群的ID必须保持唯一
ETCD_INITIAL_CLUSTER_STATE="new"   ##新建集群的时候,这个值为 new;假如加入已经存在的集群,这个值为existing。

这里是具体的要更改的说明
vim etcd
#[Member]
ETCD_NAME="etcd02" ##改
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://20.0.0.11:2380" ##改
ETCD_LISTEN_CLIENT_URLS="https://20.0.0.11:2379" ##改
##
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://20.0.0.11:2380" ##改
ETCD_ADVERTISE_CLIENT_URLS="https://20.0.0.11:2379"  ##改
ETCD_INITIAL_CLUSTER="etcd01=https://20.0.0.10:2380,etcd02=https://20.0.0.11:2380,etcd03=https://20.0.0.12:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"


6、两个node节点都改过后可以去master节点上将集群打开原来开的应该会超时而自动退出所有要重新去开启
[root@localhost k8s]# bash etcd.sh etcd01 10.4.7.11 etcd02=https://10.4.7.21:2380,etcd03=https://10.4.7.22:2380

7、然后回到node节点开启etcd
[root@localhost ~]# systemctl start etcd
[root@localhost ~]# systemctl status etcd
若是running状态证明是正常的

8、最后到master去执行集群健康检查
cd /root/k8s/etcd-cert
/opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://10.4.7.11:2379,https://10.4.7.21:2379,https://10.4.7.22:2379" cluster-health

member 988139385f78284 is healthy: got healthy result from https://10.4.7.22:2379
member 1ba3960d0c371211 is healthy: got healthy result from https://10.4.7.11:2379
member 5a0ef2a004fc4349 is healthy: got healthy result from https://10.4.7.21:2379
cluster is healthy

出现cluster is healthy说明正常

4、部署docker

4.1、安装依赖包

 yum -y install yum-utils device-mapper-persistent-data lvm2

yum-utils 提供了yum-config-manager
device 毛片儿存储驱动程序需要device-mapperpersistent-data和lvm2
device mapper 是linux2.6 内核中支持逻辑管理的通用设备映射机制
它为实现用于存储资源管理的块设备驱动提供了 一个高度模块化的内核架构

4.2、设置阿里云镜像源

yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

4.3、安装Docker-ce

yum -y install docker-ce

systemctl start docker

systemctl enable docker

4.4、设置镜像加速器

tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors":["https://05vz3np5.mirror.aliyuncs.com"]
}
EOF


docker daemon-reload
systemctl restart docker

4.5、网络优化

vim /etc/sysctl.conf
   net.ipv4.ip_forward=1
   
sysctl -p
service network restart
systemctl restart docker

4.6、查看命令

查看docker 版本
docker version
搜索nginx镜像公有仓库
docker search nginx

5、flannel网络配置

5.1、写入分配的子网段etcd中供flannel使用

master1上执行

cd /root/k8s/etcd-cert
/opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://10.4.7.11:2379,https://10.4.7.21:2379,https://10.4.7.22:2379" set /coreos.com/network/config '{"Network": "172.17.0.0/16","Backend": {"Type":"vxlan"}}'

5.2、将flannel放到node节点

tar zxvf flannel-v0.10.0-linux-amd64.tar.gz
mkdir /opt/kubernetes/{cfg,bin,ssl} -p
mv mk-docker-opts.sh flanneld /opt/kubernetes/bin

5.3、将flannel放到node上执行安装

bash flannel.sh https://10.4.7.11:2379,https://10.4.7.21:2379,https://10.4.7.22:2379

5.4、flannel网络配置docker

vim /usr/lib/systemd/system/docker.service
 13 # for containers run by docker
 14 EnvironmentFile=/run/flannel/subnet.env  ##新增
 15 ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock  ##添加部分内容
 
## 查看获取ip地址的范围
cat /run/flannel/subnet.env 
DOCKER_OPT_BIP="--bip=172.17.54.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=172.17.54.1/24 --ip-masq=false --mtu=1450"

[root@node1 ~]# systemctl daemon-reload
[root@node1 ~]# systemctl restart docker

5.5、下载一个镜像进程测试下(下载centos)

7、1进入容器后直接看有没有IP地址
yum -y install net-tools
ifconfig                ##这个时候会随机出现ip地址给你如
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.54.2  netmask 255.255.255.0  broadcast 172.17.54.255
        ether 02:42:ac:11:36:02  txqueuelen 0  (Ethernet)
        RX packets 13840  bytes 11482584 (10.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 7536  bytes 411117 (401.4 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 1  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

6、部署master组件

6.1、在masterapi-server 生成证书

上传master.zip 到master1节点
unzip master.zip
mkdir /opt/kubernetes/{cfg,bin,ssl} -p
mkdir k8s-cert
cd k8s-cert/

添加相关的文件
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
      	    "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

cat > server-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "10.4.7.11",
      "10.4.7.12",
      "10.4.7.100",
      "10.4.7.23",
      "10.4.7.24",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

查看生成的k8s证书

[root@master k8s-cert]# ls *pem
admin-key.pem  ca-key.pem  kube-proxy-key.pem  server-key.pem
admin.pem      ca.pem      kube-proxy.pem      server.pem

cp ca*pem server*pem /opt/kubernetes/ssl/
cd /root/k8s
tar zxvf kubernetes-server-linux-amd64.tar.gz
cd /root/k8s/kubernetes/server/bin
cp kube-apiserver kubectl kube-controller-manager kube-scheduler /opt/kubernetes/bin/
cd /root/k8s

生成令牌
cat > /opt/kubernetes/cfg/token.csv << EOF
`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

开启apiserver
bash apiserver.sh 10.4.7.11 https://10.4.7.21:2379,https://10.4.7.21:2379,https://10.4.7.22:2379

启动scheduler服务
./scheduler.sh 127.0.0.1

启动controller-manager
chmod +x controller-manager.sh
./controller-manager.sh 127.0.0.1

查看master 节点状态
cp /opt/kubernetes/bin/kubectl /usr/local/bin
[root@master1 k8s-cert]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok
controller-manager   Healthy   ok
etcd-0               Healthy   {"health":"true"}
etcd-1               Healthy   {"health":"true"}
etcd-2               Healthy   {"health":"true"}

把 kubelet、kube-proxy拷贝到node节点上去
cd /root/k8s/kubernetes/server/bin
scp kubelet kube-proxy 10.4.7.21:/opt/kubernetes/bin/
scp kubelet kube-proxy 10.4.7.22:/opt/kubernetes/bin/

cd /root/k8s
mkdir kubeconfig
cd kubeconfig/

拷贝kubeconfig.sh文件进行重命名
mv kubeconfig.sh kubeconfig

查看令牌的id号是什么
cat /opt/kubernetes/cfg/token.csv
更改kubeconfig的相关参数
    # 创建 TLS Bootstrapping Token
    #BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
    BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008    ## 将序列号更换为刚才查到的

设置环境变量可以写入到/etc/profile中
export PATH=$PATH:/opt/kubernetes/bin/

生成配置文件
bash kubeconfig 10.4.7.11 /root/k8s/k8s-cert/

将生成的文件拷贝到node节点
scp bootstrap.kubeconfig kube-proxy.kubeconfig 10.4.7.21:/opt/kubernetes/cfg/
scp bootstrap.kubeconfig kube-proxy.kubeconfig 10.4.7.22:/opt/kubernetes/cfg/

创建bootstrap角色赋予权限用于连接apiserver请求签名
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

6.2、node节点上操作

nod01节点操作复制node.zip到/root目录下再解压
unzip node.zip 
bash kubelet.sh 10.4.7.21

检查kubelet服务启动
ps aux | grep kube

nod02节点操作复制node.zip到/root目录下再解压
unzip node.zip 
bash kubelet.sh 10.4.7.22

检查kubelet服务启动
ps aux | grep kube

6.3、master1节点上添加授权node1和node2类同

[root@localhost kubeconfig]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A   4m27s   kubelet-bootstrap   Pending等待集群给该节点颁发证书

[root@localhost kubeconfig]# kubectl certificate approve node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A
certificatesigningrequest.certificates.k8s.io/node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A approved
//继续查看证书状态
[root@localhost kubeconfig]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A   8m56s   kubelet-bootstrap   Approved,Issued已经被允许加入群集
//查看群集节点成功加入node01节点
[root@localhost kubeconfig]# kubectl get node
NAME              STATUS   ROLES    AGE    VERSION
10.4.7.21   Ready    <none>   118s   v1.12.3

2、在node01节点操作启动proxy服务
[root@localhost ~]# bash proxy.sh 10.4.7.21
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@localhost ~]# systemctl status kube-proxy.service 

6.4、master节点上查看下部署是否正常了

[root@master1 ~]# kubectl get node
NAME        STATUS   ROLES    AGE   VERSION
10.4.7.21   Ready    <none>   11h   v1.12.3
10.4.7.22   Ready    <none>   11h   v1.12.3
[root@master1 ~]#


二、部署多节点集群之添加master2节点

1、初始化master2环境信息

# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
#关闭核心防护
find_key="SELINUX="
sed -ri "/^$find_key/c${find_key}disabled" /etc/selinux/config

更改主机名每个主机不一样
hostnamectl set-hostname master2
su

2、将基本信息从master1拷贝到master2

scp -r /opt/kubernetes 10.4.7.12:/opt
scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service 10.4.7.12:/usr/lib/systemd/system
scp -r /opt/etcd 10.4.7.12:/opt

3、到master2上修改相关的参数信息

cd /opt/kubernetes/cfg
vim kube-apiserver
[root@master2 cfg]# vim kube-apiserver
[root@master2 cfg]# cat  kube-apiserver

KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://10.4.7.11:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
--bind-address=10.4.7.12 \      ## 更改
--secure-port=6443 \
--advertise-address=10.4.7.12 \   ## 更改

4、在master2上开启apiservercontroller-managerscheduler服务

systemctl start kube-apiserver.service && systemctl start kube-scheduler.service && systemctl start kube-controller-manager.service
systemctl status kube-apiserver.service && systemctl status kube-scheduler.service && systemctl status kube-controller-manager.service

cp /opt/kubernetes/bin/kubectl /usr/local/bin
[root@master2 cfg]# kubectl get node
NAME        STATUS   ROLES    AGE   VERSION
10.4.7.21   Ready    <none>   13h   v1.12.3
10.4.7.22   Ready    <none>   13h   v1.12.3

这时候其实master2问题还是很大的。。。master1挂了但是2还是不能直接顶替它干活的虽然能看它的节点信息但是node指的地址都是master1所有的都指向的是master1

我们可以在任意一个node上看下kubelet的配置kubelet.kubeconfig

三、负载均衡集群

1、在开两台做nginx负载均衡23,24 初始化环境信息

负载均衡做nginx还要做反向代理有个VIP要是给外面提供的。你的VIP地址不能消失消失了node就会是no 连接
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
#关闭核心防护
find_key="SELINUX="
sed -ri "/^$find_key/c${find_key}disabled" /etc/selinux/config

2、安装nginx两台操作一样

cat > /etc/yum.repos.d/nginx.repo << EOF
[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
EOF

yum -y install nginx

3、对nginx做基本配置两台操作一样

四层转发的内容需要添加在配置脚本上
vim /etc/nginx/nginx.conf

user  nginx;
worker_processes  auto;

error_log  /var/log/nginx/error.log notice;
pid        /var/run/nginx.pid;


events {
    worker_connections  1024;
}

stream {

  log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
  access_log /var/log/nginx/k8s-access.log main;

  upstream k8s-apiserver {
    server 10.4.7.11:6443;
    server 10.4.7.12:6443;
  }
  server {
            listen 6443;
            proxy_pass k8s-apiserver;
  }
}


http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    keepalive_timeout  65;

    #gzip  on;

    include /etc/nginx/conf.d/*.conf;
}

4、启动nginx两台操作一样

systemctl start nginx

systemctl enable nginx

5、安装keepalived 服务(并制作配置文件)

yum -y install keepalived


cat > /root/keepalived.conf <<EOF
! Configuration File for keepalived

global_defs {
   # 接收邮件地址
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   # 邮件发送地址
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id NGINX_MASTER
}

vrrp_script check_nginx {
    script "/etc/nginx/check_nginx.sh"
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51 # VRRP 路由 ID实例每个实例是唯一的
    priority 100    # 优先级备服务器设置 90
    advert_int 1    # 指定VRRP 心跳包通告间隔时间默认1秒
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.4.7.100/24
    }
    track_script {
        check_nginx
    }
}
EOF

cat > /etc/nginx/check_nginx.sh << EOF         ## 这个有个问题就是$符号所以需要查看下
#!/bin/bash
chect_nginx_start (){
  count=$(ps -ef |grep keepalived |egrep -cv "grep |$$")
  if [ "$count" -ne 3 ];then
    systemctl start keepalived
  fi

}

test (){
  count=$(ps -ef |grep nginx |egrep -cv "grep |$$")
  if [ "$count" -eq 0 ];then
      systemctl stop keepalived
  else
      chect_nginx_start
  fi

}
test
EOF 

chmod +x /etc/nginx/check_nginx.sh

最好是添加一个计划任务
crontab -e
* * * * * sh /etc/nginx/check_nginx.sh



6、启动keepalived

systemctl start keepalived   
ip a            # 启动后需要查看下是否有漂移ip地址了

说明下 当两台nginx服务都启动后keepalived服务在启动的时候不会伴随nginx服务的启动而启动但是会伴随nginx的关闭而关闭你可以做个测试关掉一个nginx服务看下漂移地址会不会到另外一个nginx的节点上去。

7、下面做VIP的负载均衡让node1和node2认master1和master2

7.1、要改3个文件两个node节点操作

cd /opt/kubernetes/cfg/
bootstrap.kubeconfig    将里面的10.4.7.11 改为 10.4.7.100
kubelet.kubeconfig      将里面的10.4.7.11 改为 10.4.7.100
kube-proxy.kubeconfig   将里面的10.4.7.11 改为 10.4.7.100

7.2、重启kubelet和kube-proxy服务

systemctl restart kubelet.service
systemctl restart kube-proxy.service

8、创建一个deployment查看下是否正常

kubectl run nginx --image=nginx
阿里云国内75折 回扣 微信号:monov8
阿里云国际,腾讯云国际,低至75折。AWS 93折 免费开户实名账号 代冲值 优惠多多 微信号:monov8 飞机:@monov6
标签: k8s