新网创想网站建设,新征程启航
为企业提供网站建设、域名注册、服务器等服务
如图所示Flannel的工作原理可以解释为:
[root@node01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 //安装依赖包
已加载插件:fastestmirror
base | 3.6 kB 00:00:00
extras | 2.9 kB 00:00:00
...
[root@node01 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo //设置阿里云镜像源
已加载插件:fastestmirror
adding repo from: https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@node01 ~]# yum install -y docker-ce //安装Docker-CE
已加载插件:fastestmirror
docker-ce-stable | 3.5 kB 00:00:00
(1/2): docker-ce-stable/x86_64/updateinfo | 55 B 00:00:01
(2/2): docker-ce-stable/x86_64/primary_db | 37 kB 00:00:01
Loading mirror speeds from cached hostfile
...
[root@node01 ~]# systemctl start docker.service //启动docker服务
[root@node01 ~]# systemctl enable docker.service //配置开机自启
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@node01 ~]# tee /etc/docker/daemon.json <<-'EOF' //配置镜像加速
> {
> "registry-mirrors": ["https://**********.aliyuncs.com"]
> }
> EOF
{
"registry-mirrors": ["https://**********.aliyuncs.com"]
}
[root@node01 ~]# systemctl daemon-reload //重新加载进程
[root@node01 ~]# systemctl restart docker //重启docker
[root@node01 ~]# vim /etc/sysctl.conf //编辑开启路由转发功能
...
# For more information, see sysctl.conf(5) and sysctl.d(5).
net.ipv4.ip_forward=1
:wq
[root@node01 ~]# sysctl -p //重新加载
net.ipv4.ip_forward = 1
[root@node01 ~]# service network restart //重启网络
Restarting network (via systemctl): [ 确定 ]
[root@node01 ~]# systemctl restart docker //重启docker服务
[root@node01 ~]# docker version
Client: Docker Engine - Community //查看docker版本
Version: 19.03.5
API version: 1.40
Go version: go1.12.12
... //docker服务部署完成
[root@master01 etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}' //写入分配的子网段到ETCD中,供flannel使用
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}
[root@master01 etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379" get /coreos.com/network/config //查看是否成功写入
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}
[root@master01 etcd-cert]# cd .. //回到k8s目录
[root@master01 k8s]# ls //查看flannel软件包是否存在
cfssl.sh etcd-v3.3.10-linux-amd64 kubernetes-server-linux-amd64.tar.gz
etcd-cert etcd-v3.3.10-linux-amd64.tar.gz
etcd.sh flannel-v0.10.0-linux-amd64.tar.gz
[root@master01 k8s]# scp flannel-v0.10.0-linux-amd64.tar.gz flannel.sh root@192.168.80.13:/root //将软件包拷贝到node01节点
root@192.168.80.13's password:
flannel-v0.10.0-linux-amd64.tar.gz 100% 9479KB 61.1MB/s 00:00
flannel.sh: No such file or directory
[root@master01 k8s]# scp flannel-v0.10.0-linux-amd64.tar.gz flannel.sh root@192.168.80.14:/root //将软件包拷贝到node02节点
root@192.168.80.14's password:
flannel-v0.10.0-linux-amd64.tar.gz 100% 9479KB 119.3MB/s 00:00
flannel.sh: No such file or directory
node01、node02节点同步操作
[root@node01 ~]# ls //查看软件包是否成功拷贝
anaconda-ks.cfg flannel-v0.10.0-linux-amd64.tar.gz
[root@node01 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz //解压软件包
flanneld
mk-docker-opts.sh
README.md
[root@node01 ~]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p //递归创建k8s工作目录
[root@node01 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/ //移动脚本文件到工作目录下的bin目录
[root@node01 ~]# vim flannel.sh //编辑flannel执行脚本 并生成配置文件
#!/bin/bash
ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}
cat </opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"
EOF
cat </usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d / /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
:wq
[root@node01 ~]# bash flannel.sh https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379 //执行flannel脚本文件开启flannel网络功能
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
[root@node01 ~]# vim /usr/lib/systemd/system/docker.service //配置docker启动脚本连接flannel
...
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env //添加连接运行语句
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock //添加变量
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
...
:wq
[root@node01 ~]# cat /run/flannel/subnet.env //查看docker运行时连接flannel文件
DOCKER_OPT_BIP="--bip=172.17.49.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=172.17.49.1/24 --ip-masq=false --mtu=1450" //bip指定启动时的子网 注意:此处node01与node02指定启动时的子网IP地址都属于172.17.0.0/24网段
查看网络
[root@node01 ~]# systemctl daemon-reload //重新加载进程
[root@node01 ~]# systemctl restart docker //重新启动docker
[root@node01 ~]# ifconfig //查看网络信息
docker0: flags=4099 mtu 1500
inet 172.17.49.1 netmask 255.255.255.0 broadcast 172.17.49.255 //docker0网卡IP地址
...
ens33: flags=4163 mtu 1500
inet 192.168.80.13 netmask 255.255.255.0 broadcast 192.168.80.255
...
flannel.1: flags=4163 mtu 1450
inet 172.17.49.0 netmask 255.255.255.255 broadcast 0.0.0.0 //flannel网卡地址
...
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
...
node02服务器操作
[root@node02 ~]# ifconfig
docker0: flags=4099 mtu 1500
inet 172.17.63.1 netmask 255.255.255.0 broadcast 172.17.63.255 //docker网卡信息
...
ens33: flags=4163 mtu 1500
inet 192.168.80.14 netmask 255.255.255.0 broadcast 192.168.80.255
...
flannel.1: flags=4163 mtu 1450
inet 172.17.63.0 netmask 255.255.255.255 broadcast 0.0.0.0 //flannel网卡信息
...
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
...
[root@node02 ~]# ping 172.17.49.1 //使用ping命令测试网络是否互通
PING 172.17.49.1 (172.17.49.1) 56(84) bytes of data.
64 bytes from 172.17.49.1: icmp_seq=1 ttl=64 time=0.344 ms
64 bytes from 172.17.49.1: icmp_seq=2 ttl=64 time=0.333 ms
64 bytes from 172.17.49.1: icmp_seq=3 ttl=64 time=0.346 ms
^C
--- 172.17.49.1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.333/0.341/0.346/0.005 ms
[root@node01 ~]# docker run -it centos:7 /bin/bash //运行docker镜像
Unable to find image 'centos:7' locally
7: Pulling from library/centos
ab5ef0e58194: Pull complete
Digest: sha256:4a701376d03f6b39b8c2a8f4a8e499441b0d567f9ab9d58e4991de4472fb813c
Status: Downloaded newer image for centos:7
[root@e8ee45a4fd28 /]# yum install net-tools -y //容器中安装网络工具
Loaded plugins: fastestmirror, ovl
Determining fastest mirrors
* base: mirrors.163.com
* extras: mirrors.163.com
...
node01服器操作
[root@e8ee45a4fd28 /]# ifconfig //查看网卡信息
eth0: flags=4163 mtu 1450
inet 172.17.49.2 netmask 255.255.255.0 broadcast 172.17.49.255
...
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
...
[root@47aa8b55a61a /]# ifconfig //查看网卡信息
eth0: flags=4163 mtu 1450
inet 172.17.63.2 netmask 255.255.255.0 broadcast 172.17.63.255
...
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
...
[root@47aa8b55a61a /]# ping 172.17.49.2 //node02服务器中docker容器使用ping命令测试与node01服务器中docker是否可以通信
PING 172.17.49.2 (172.17.49.2) 56(84) bytes of data.
64 bytes from 172.17.49.2: icmp_seq=1 ttl=62 time=0.406 ms
64 bytes from 172.17.49.2: icmp_seq=2 ttl=62 time=0.377 ms
64 bytes from 172.17.49.2: icmp_seq=3 ttl=62 time=0.389 ms
64 bytes from 172.17.49.2: icmp_seq=4 ttl=62 time=0.356 ms
^C
--- 172.17.49.2 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3001ms
rtt min/avg/max/mdev = 0.356/0.382/0.406/0.018 ms //成功通信