Docker 网络
理解Docker0
清空所有环境
# centos7系统环境
# 清空所有容器
[root@localhost ~]#docker rm -f $(docker ps -aq)
5b2b527d0cde
7f3c9bbc1a19
0153e509926d
d0184a7eb0ed
cb638941890b
1ce46e3625c8
31a3aaa44ff1
9ed360d184c0
1ac5ac579488
# 清除所有镜像
[root@localhost ~]# docker rmi -f $(docker images -aq)
Deleted: sha256:164e693bcb758292a0fc8b32b9c40d053d33eb3f5418b5a84a42f911a7121bb8
Untagged: mycentos:1.0
Deleted: sha256:60d5f5ace1c7cb9e3f882b494671a973f5a5dc6eb28479c30149fc3c796116b5
Untagged: nginx:latest
Untagged: nginx@sha256:0d17b565c37bcbd895e9d92315a05c1c3c9a29f762b011a10c54a66cd53c9b31
Deleted: sha256:605c77e624ddb75e6110f997c58876baa13f8754486b461117934b24a9dc3a85
Deleted: sha256:b625d8e29573fa369e799ca7c5df8b7a902126d2b7cbeb390af59e4b9e1210c5
Deleted: sha256:7850d382fb05e393e211067c5ca0aada2111fcbe550a90fed04d1c634bd31a14
Deleted: sha256:02b80ac2055edd757a996c3d554e6a8906fd3521e14d1227440afd5163a5f1c4
Deleted: sha256:b92aa5824592ecb46e6d169f8e694a99150ccef01a2aabea7b9c02356cdabe7c
Deleted: sha256:780238f18c540007376dd5e904f583896a69fe620876cabc06977a3af4ba4fb5
Deleted: sha256:2edcec3590a4ec7f40cf0743c15d78fb39d8326bc029073b41ef9727da6c851f
Untagged: tomcat:9.0
Untagged: tomcat@sha256:cd96d4f7d3f5fc4d3bc1622ec678207087b8215d55021a607ecaefba80b403ea
Deleted: sha256:b8e65a4d736dca28cd65b1b3b18100aad2984fc9a484d423db7a8fcee1ed5d48
Deleted: sha256:7cea9b28ec076e69d2921f8fec7a6a0d40a815d8fc91a5ecfb7e817a0ae6d5cf
测试
# centos7系统环境
# 查看IP地址
1.lo:本机回环地址
2.ens33:阿里云内网地址
5.docker0:docker地址(可以想象为路由器),也是其他容器的gw
[root@localhost ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:90:b4:23 brd ff:ff:ff:ff:ff:ff
inet 172.16.12.154/24 brd 172.16.12.255 scope global noprefixroute dynamic ens33
valid_lft 1499sec preferred_lft 1499sec
inet6 fe80::20c:29ff:fe90:b423/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
link/ether 52:54:00:46:90:be brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 52:54:00:46:90:be brd ff:ff:ff:ff:ff:ff
5: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:d4:3f:64:05 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:d4ff:fe3f:6405/64 scope link
valid_lft forever preferred_lft forever
6: br-983a1a9b2f0f: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:dd:dd:3e:ce brd ff:ff:ff:ff:ff:ff
inet 172.18.0.1/16 brd 172.18.255.255 scope global br-983a1a9b2f0f
valid_lft forever preferred_lft forever
inet6 fe80::42:ddff:fedd:3ece/64 scope link
valid_lft forever preferred_lft forever
三个网络
# 问题:docker 是如何处理容器网络访问的?
tomcat 容器 ----------> mysql 容器
# centos7系统环境
# 执行一个tomcat容器运行命令
[root@localhost ~]# docker run -d -P --name tomcat01 tomcat
# 查看容器的内部网络地址 ip addr
[root@localhost ~]# docker exec -it tomcat01 ip addr
****如果出现报错提示****
[root@localhost ~]# docker exec -it tomcat01 ip addr
OCI runtime exec failed: exec failed: container_linux.go:380: starting container process caused: exec: "ip": executable file not found in $PATH: unknown
# 按照如下方式解决
# 执行如下命令交互模式进入容器内
[root@localhost ~]# docker exec -it 6fde5463673b /bin/bash
root@6fde5463673b:/usr/local/tomcat# pwd
/usr/local/tomcat
# 在容器内安装iproute2
root@6fde5463673b:/usr/local/tomcat# apt update && apt install -y iproute2
Get:1 http://security.debian.org/debian-security bullseye-security InRelease [44.1 kB]
Get:2 http://deb.debian.org/debian bullseye InRelease [116 kB]
Get:3 http://deb.debian.org/debian bullseye-updates InRelease [39.4 kB]
Get:4 http://security.debian.org/debian-security bullseye-security/main amd64 Packages [102 kB]
Get:5 http://deb.debian.org/debian bullseye/main amd64 Packages [8183 kB]
Get:6 http://deb.debian.org/debian bullseye-updates/main amd64 Packages [2592 B]
Fetched 8487 kB in 3min 44s (37.9 kB/s)
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
All packages are up to date.
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
The following additional packages will be installed:
libatm1 libbpf0 libcap2 libcap2-bin libelf1 libmnl0 libpam-cap libxtables12
# 容器内安装ping命令
[root@localhost ~]# apt-get install iputils-ping
# 安装vim命令
[root@localhost ~]# apt-get install vim
#安装ifconfig命令
[root@localhost ~]#apt-get install net-tools
# 在容器内执行 ip addr,发现容器启动的时候会得到一个eth0@if18 ip地址,docker分配
[root@6fde5463673b:/usr/local/tomcat]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
17: eth0@if18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
# 思考:linux宿主机能不能ping通容器内部
# 在linux宿主机ping docker容器ip地址,结果如下,可以ping通
[root@localhost ~]# ping 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=5.87 ms
64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.054 ms
64 bytes from 172.17.0.2: icmp_seq=3 ttl=64 time=0.061 ms
--- 172.17.0.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2001ms
rtt min/avg/max/mdev = 0.054/1.995/5.871/2.740 ms
原理
1、我们每启动一个docker容器,docker就会给docker容器分配一个ip,我们只要安装了docker,就会有一个默认网卡docker桥接模式,使用的技术是veth-pair技术,如图所示:
在centos7宿主机再次测试 ip addr
# 在宿主机linux环境执行,查看ip配置信息
[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:90:b4:23 brd ff:ff:ff:ff:ff:ff
inet 172.16.12.154/24 brd 172.16.12.255 scope global noprefixroute dynamic ens33
valid_lft 1654sec preferred_lft 1654sec
inet6 fe80::20c:29ff:fe90:b423/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
link/ether 52:54:00:46:90:be brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 52:54:00:46:90:be brd ff:ff:ff:ff:ff:ff
5: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:d4:3f:64:05 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:d4ff:fe3f:6405/64 scope link
valid_lft forever preferred_lft forever
6: br-983a1a9b2f0f: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:dd:dd:3e:ce brd ff:ff:ff:ff:ff:ff
inet 172.18.0.1/16 brd 172.18.255.255 scope global br-983a1a9b2f0f
valid_lft forever preferred_lft forever
inet6 fe80::42:ddff:fedd:3ece/64 scope link
valid_lft forever preferred_lft forever
# 启动一个容器,宿主机就会出现一对网卡
18: veth4f4cbe7@if17: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether ca:17:c3:c0:51:6c brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet6 fe80::c817:c3ff:fec0:516c/64 scope link
valid_lft forever preferred_lft forever
2、再启动一个容器tomcat02测试,发现又多了一对网卡
# 我们发现容器带来的网卡,都是成对出现
# veth-pari就是一对的虚拟设备接口,所有容器的指向一个共同网关,宿主机docker0
# OpenStac Docker容器之间的连接,OVS的连接,都是使用veth-pair技术
3、测试一下tomcat01和tomcat02是否ping通,完全可以,都在一个网段
结论:tomcat01和tomcat02是共用一个路由器docker0,所有的容器在不指定网络的情况下,都是有docker0转发。
Docker中的所有网络接口都是虚拟的,虚拟的转发效率高。(内网传递文件)
如果容器删除,对应的vets-pair对删除(veth-pair)
– Link
1、假设一个场景,我们编写了一个微服务,database url=ip:, 项目不重启,数据库ip换掉了,我们希望可以处理这个问题,可以用名字来进行访问容器?
# 直接这样执行是ping不通的
[root@localhost ~]# docker exec -it tomcat02 ping tomcat01
ping:tomcat01: No address associated with hostname
# 如何解决呢?
# 通过 --link即可以解决网络连通问题
[root@localhost ~]# docker run -d -P --name tomcat03 --link tomcat02 tomcat
3e5adcd4d5b5e4b488f6f68747c21886d157e3518180d8109d607b2de8281d21
[root@localhost ~]# docker exec -it tomcat03 ping tomcat02
PING tomcat02 (172.17.0.3) 56(84) bytes of data.
64 bytes from tomcat02 (172.17.0.3): icmp_seq=1 ttl=64 time=24.5 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=2 ttl=64 time=0.074 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=3 ttl=64 time=0.098 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=4 ttl=64 time=0.078 ms
64 bytes from tomcat02 (172.17.0.3): icmp_seq=5 ttl=64 time=0.075 ms
# 反向可以ping通吗?
[root@localhost ~]# docker exec -it tomcat02 ping tomcat03
ping:tomcat03: No address associated with hostname
2、探究: 利用inspect
# 在centos系统环境执行如下命令
# 第一行是docker0
[root@localhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
f0252b7d323e bridge bridge local
4d03c52372e1 host host local
898465300bee none null local
983a1a9b2f0f x34-arbitrary-file-deletion_default bridge local
# 然后执行docker network inspect [NETWORK ID]
[root@localhost ~]# docker network inspect f0252b7d323e
[
{
"Name": "bridge",
"Id": "f0252b7d323ed587414479e7c9500efe84a011bee1fc745674a6193fc2147874",
"Created": "2022-01-09T15:16:02.834596925-05:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default", # 默认docker0
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1" # 网关docker0
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"2923494636b68c0208e750a5617387899a50ac8ff5f59a139d2e1b3d49f9725e": {
"Name": "tomcat01",
"EndpointID": "8b947e54e960868ec731fb26371916b20d5f1fe9e583b5b4fd05359d58b504bb",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16", # 每启动一个容器,不指定ip,容器就会随机分配一个IP
"IPv6Address": ""
},
"3e5adcd4d5b5e4b488f6f68747c21886d157e3518180d8109d607b2de8281d21": {
"Name": "tomcat03",
"EndpointID": "186c9a43a4ca47d8603e5487c6f909fc5490b2bdb9456d719d230cc592e6df4c",
"MacAddress": "02:42:ac:11:00:04",
"IPv4Address": "172.17.0.4/16", # 每启动一个容器,不指定ip,容器就会随机分配一个IP
"IPv6Address": ""
},
"fc4aa0be8b52ff3967ad44a5e6678e194767f26e1fd3840d9e2b1340fa24e1df": {
"Name": "tomcat02",
"EndpointID": "872b06c64cbca78d15d3a0fb5f4b559e5fe02cb649a86020d40a1f20e2838948",
"MacAddress": "02:42:ac:11:00:03",
"IPv4Address": "172.17.0.3/16", # 每启动一个容器,不指定ip,容器就会随机分配一个IP
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {
}
}
]
本质:–link就是我们在hosts配置里增加了一条172.17.0.3 tomcat02 fc4aa0be8b52映射
真实操作场景,已经不建议使用–link了!!
自定义网络,不适用docker0!
docker0问题:不支持容器名连接访问!