天天看点

dockerDescription: test imageDescription: test imageDescription: test imageDescription: test imageDescription: test imageDescription: test imageDescription: test imageDescription: test imagethis is my first dockerfileversion 1.0Author:chenjiao

lxc:Linux container

chroot,根切换;

namespaces:名称空间

CGroups:控制组

yum install lxc lxc-templates

cd /usr/share/lxc/templates(里面由很很多不同系统的容器,这里以centos的容器模板作为示例)

vim lxc-centos(改里面的yum源是为了后续创建centos的lxc容器使用,改为iso镜像的挂载目录)

首先要把物理机网卡做成网桥,网桥名为:virbr0

简单使用:

lxc-checkconfi:

检查系统环境是否满足容器使用要求

lxc-create:创建lxc容器

lxc-create -n NAME -t TEMPLATE_NAME

lxc-start:启动容器;

lxc-start -n NAME -d

lxc-info:查看容器相关的信息

lxc-info -n NAME

lxc-console:附加至容器的控制台

lxc-console -n NAME -t NUMBER

lxc-stop:停止容器

lxc-destory:删除处于停机状态的容器

lxc-snapshot:创建和恢复快照

lxc-create -n centos7 -t centos (创建一个centos的lxc)

lxc-start -n centos7(启动一个centos的lxc)

lxc-info -n centos7

lxc-top -n centos7

lxc-monitor -n centos7

lxc-freeze

lxc-stop -n centos7

lxc-start -n centos7 -d

lxc-console -n centos7 -t 0(登陆到lxc系统中去)

lxc-destroy -n centos7

lxc-snapshot -n cenots7 -L(列出所有的快照)

lxc-snapshot -n centos7

lxc-snashot -n cneots7 -d 快照名

lxc-clone -s centos7 centos7-clone

yum install docker

systemctl start docker

iptable -t nat -Ln

docker search centos

docker pull busybox:latest

docker images

docker pull centos:7

docker pull cenots:6

docker run --name c1 -it centos:latest /bin/bashbr/>[root@a39f07c4ff2f]#

退出容器crtl+P+Q

docker ps

docker ps -a

docker stop c1

docker rm c1

构建镜像

docker run -it --name c1 centos

[root@a39f07c4ff2f]#yum install net-tools

crtl+P+Q

docker commit -p -a "mageedu <[email protected]>" c1

docker run -it --name c2-net 5bfed5aba96ebr/>[root@a39f07c4ff2f]#ifconfig

docker kill c2-net

docker rm c2-net

docker images --no-trunc

docker tag 5bfed5aba96e mageedu/centos:net-tools

docker tag mageedu/centos:net-tools mageedu/centos:iproute

docker rmi mageedu/centos:net-tools

docker login -u mageedu -p

docker push mageedu/centos:net-tools

导出镜像:

docker save mageedu/centos:net-toos -o centos-net-tools.tar

导入镜像:

docker load -i centos-net-tools.tar

登录仓库

docker login -u mageedu

docker push mageedu/httpd

登出仓库

docker logout

网络

节点1的容器去访问节点2容器,首先节点1的容器做snat转换成节点1的物理ip,通过物理ip去访问节点2的物理ip,然后节点2内的容器做dnat转换,让访问节点2的物理ip dnat成节点2的容器。完成整个通信

docker network ls

brctl show

docker network inspect bridge

创建网络名称空间

ip netns list

ip netns add r1

ip netns add r2

ip netns exec r2 ifconfig -a

增加并关联两张虚拟网卡

ip links add name veth1.1 type veth peer name veth1.2

ip link show

把第一张虚拟网卡放置在第一个名称空间,并设置ip

ip link set dev veth1.2 netns r1

ip netns exec r1 ifconfig -a

ip netns exec r1 ip link set dev veth1.2 name eth0

ifconfig veth1.1 10.1.0.1/24 up

ip netns exec r1 ifconfig eth0 10.1.0.2/24 up

ping 10.1.0.2

把第两张虚拟网卡放置在第两个名称空间,并设置ip

ip link set dev veth1.1 netns r2

ip nets exec r2 ifconfig veth1.1 10.1.0.3/24 up

ip netns exec r2 ping 10.1.0.2

docker run --name t1 -it --rm busybox

/# ifcofig

/# exit

docker run --name t1 -it --network bridge --rm busybox

/# hostname

docker run --name t1 -it --network none --rm busybox

docker run --name t1 -it --network bridge -h t1.magedu.com --rm busybox

/# cat /etc/hosts

/# cat /etc/resolv.conf

docker run --name t1 -it --network bridge -h t1.magedu.com --dns 114.114.114.114 --rm busybox

docker run --name t1 -it --network bridge -h t1.magedu.com --dns 114.114.114.114 --add-host www.magedu.com:1.1.1.1 --rm busybox

共享网络模型

docker run --name b1 -ir --rm busybox

/# httpd

docker run --name b2 --network container:b1 -it --rm busybox

/# curl http://127.0.0.1

docker run --name t2 -it --network host --rm busybox

docker network create -d bridge --subnet "172.26.0.0/16" --gateway "172.26.0.1" mybr0

ifconfig

docker run --name t1 -it --net mybr0 busybox

docker run --name t2 -it --net bridge busybox

docker run --name b2 -it -v /data busybox

docker inspect b2

docker run --name b2 -it --rm -v /data/vlolumes/b2:/data busybox

json格式的数组

过滤查看各项的值,最上层的为根用点号表示

docker inspect -f {{.Mounts}} b2

docker inspect -f {{.NetworkSettins.IPAddress}} b2

容器间共享数据

docker run --name b3 -it --rm -v /data/vlolumes/b2:/data busybox

docker run --name infracon -it -v /data/infracon/volumes/:/data/web/html busybox

docker run --name nginx --network container:infracon --volumes-from infracon -it busybox

docker inspect nginx

如果容器使用nfs作为mysql数据库的后端存储,一定要加上nfsvers=3,nolock ,否则会报出 error: 37的错误

docker volume create --driver local --opt type=nfs --opt o=addr=192.168.1.200,rw,nfsvers=3,nolock --opt device=:/data volume-nfs

dockerfile

vim Dockerfile

Description: test image

FROM busybox:latest

MAINTAINER "MageEdu <[email protected]>"

#LABEL maintainer="MageEdu <[email protected]>"

COPY index.html /data/web/html/#要以斜线结尾

docker build -t tinyhttpd:v0.1-1 ./

docker run --name tinyweb1 --rm tinyhttpd:v0.1-1 cat /data/web/html/index.html

cp -r /etc/yum.repos.d/ ./

COPY index.html /data/web/html/

COPY yum.repos.d /etc/yum.repos.d/

docker build -t tinyhttpd:v0.1-2 ./

docker run --name tinyweb1 --rm tinyhttpd:v0.1-2 ls /etc/yum.repos.d/

ADD http://nging.org/download/nginx-1.15.2.tar.gz /usr/local/src/

docker build -t tinyhttpd:v0.1-3 ./

docker run --name tinyweb1 --rm tinyhttpd:v0.1-3 ls /usr/local/src/

#ADD http://nging.org/download/nginx-1.15.2.tar.gz /usr/local/src/

ADD nginx-1.15.2.tar.gz /usr/local/src/

docker build -t tinyhttpd:v0.1-4 ./

docker run --name tinyweb1 --rm tinyhttpd:v0.1-4 ls /usr/local/src/

WORKDIR /usr/local/src/

ADD nginx-1.15.2.tar.gz ./

VOLUME /data/mysql/

docker build -t tinyhttpd:v0.1-5 ./

docker run --name tinyweb1 --rm tinyhttpd:v0.1-5 mount

docker inspect tinyweb1

EXPOSE 80/tcp

docker build -t tinyhttpd:v0.1-6 ./

docker run --name tinyweb1 --rm tinyhttpd:v0.1-6 /bin/http -f -h /data/html

curl 172.17.0.6

dock port tinyweb1

docker run --name tinyweb1 --rm -P tinyhttpd:v0.1-6 /bin/http -f -h /data/html

curl 192.168.1.200:32768

ENV DOC_ROOT=/data/web/html/ \

WEB_SERVER_PACKAGE="nginx-1.15.2"

COPY index.html ${DOC_ROOT:-/data/web/html/}

ADD ${WEB_SERVER_PACKAGE}.tar.gz ./

docker build -t tinyhttpd:v0.1-7 ./

docker run --name tinyweb1 --rm -P tinyhttpd:v0.1-7 ls /usr/local/src/

docker run --name tinyweb1 --rm -P tinyhttpd:v0.1-7 printenv

docker run --name tinyweb1 --rm -P -e WEB_SERVER_PACKAGE="nginx-1.15.1" tinyhttpd:v0.1-7

WEB_SERVER_PACKAGE="nginx-1.15.2.tar.gz"

ADD http://nging.org/download/${WEB_SERVER_PACKAGE} /usr/local/src/

RUN cd /usr/local/src/ && \

tar -xf ${WEB_SERVER_PACKAGE}

docker build -t tinyhttpd:v0.1-8 ./

docker run --name tinyweb1 --rm -P tinyhttpd:v0.1-8 ls /usr/local/src/

FROM busybox

LABEL maintainer="MageEdu <[email protected]>" app="http"

ENV WEB_DOC_ROOT="/data/web/html/"

RUN mkdir -p $WEB_DOC_ROOT && \

echo "Busybox http server." > ${WEB_DOC_ROOT}/index.html

CMD /bin/httpd -f -h ${WEB_DOC_ROOT} #这条指令会替换pid为1shell的进程

docker build -t tinyhttpd:v0.2-1 ./

docker image inspect tinyhttpd:v0.2-1 查看cmd这行

docker run --name tinyweb1 -it --rm -P tinyhttpd:v0.2-1(无法进入交互式,因为为1shell的pid被替换了CMD指令了,因为http命令没有交互式)

docker run -it tinyweb2 /bin/sh

/# ps

CMD ["/bin/httpd","-f","-h ${WEB_DOC_ROOT}"] #不会以shell的子进程启动,所以此处的变量不会被替换启动会报错,我们所有的变量替换,执行的命令均为shell下面的命令

docker build -t tinyhttpd:v0.2-2 ./

docker image inspect tinyhttpd:v0.2-2 查看cmd这行注意与上面的区别

docker run --name tinyweb1 -it --rm -P tinyhttpd:v0.2-2 是会报错

可以把CMD ["/bin/httpd","-f","-h ${WEB_DOC_ROOT}"] 该为CMD ["/bin/sh","-c","/bin/httpd","-f","-h ${WEB_DOC_ROOT}"]

docker build -t tinyhttpd:v0.2-3 ./

docker run --name tinyweb1 -it -P tinyhttpd:v0.2-3执行完自动退出

ENTRYPOINT /bin/httpd -f -h ${WEB_DOC_ROOT}

docker build -t tinyhttpd:v0.2-5 ./

docker run --name tinyweb1 -it --rm -P tinyhttpd:v0.2-5

docker run --name tinyweb1 -it --rm -P tinyhttpd:v0.2-5 ls /data/web/html/# 执行这句会把ls /data/web/html/ 当成参数即ENTRYPOINT /bin/httpd -f -h ${WEB_DOC_ROOT} ls /data/web/html/

改成下面的执行方式即可

docker run --name tinyweb1 -it --rm -P --entrypoint "ls /data/web/html/" tinyhttpd:v0.2-5

如果CMD和ENTRYPOINT都同时定义了那么CMD的执行会被当成参数附加给ENTRYPOINT

CMD ["/bin/httpd","-f","-h ${WEB_DOC_ROOT}"]

ENTRYPOINT /bin/sh -c

docker build -t tinyhttpd:v0.2-6 ./

docker inspect tinyhttpd:v0.2-6 观察ENTRYPOINT和CMD这两行

ENTRYPOINT ["/bin/sh","-c"]

docker build -t tinyhttpd:v0.2-7 ./

docker inspect tinyhttpd:v0.2-7 观察ENTRYPOINT和CMD这两行与上面对比

docker run --name tinyweb1 -it --rm -P tinyhttpd:v0.2-7

docker run --name tinyweb1 -it --rm -P tinyhttpd:v0.2-7 "ls /data" 此处的会替换CMD命令,所以命令变成了ENTRYPOINT ["/bin/sh","-c","ls /data"]

cat entrypoint.sh

#!/bin/sh

cat > /etc/nginx/conf.d/www.conf << EOF

server {

server_name ${HOSTNAME};

listen ${IP:-0.0.0.0}:${PORT:-80};

root ${NGX_DOC_ROOT:-/usr/shar/nginx/html};

}

EOF

exec "$@"

FROM nginx:1.14-alpine

LABEL maintainer="MageEdu <[email protected]>"

ENV NGX_DOC_ROOT="/data/web/html/"

ADD index.html ${NGX_DOC_ROOT}

ADD entrypoint.sh /bin/

CMD ["/usr/sbin/nginx","-g","daemon off;"]

ENTRYPOINT ["/bin/entrypoint.sh"]

docker build -t myweb:v0.3-1 ./

docker run --name myweb1 --rm -P myweb:v0.3-1

docker exec -it myweb1 /bin/sh

wget -O - -q localhost

docker run --naem myweb1 --rm -P -e "PORT=8080" myweb:v0.3-1

HEALTHCHECK --start-period=3s CMD wget -O - -q http://${ip:-0.0.0.0}:${PORT:-80}/#检测容器中服务健康状况

docker build -t myweb:v0.3-7 ./

docker run --name myweb1 --rm -P -e "PORT=8080" myweb:v0.3-7

ARG author="MageEdu <[email protected]>"

LABEL maintainer="${author}"

docker build --build-arg -t myweb:v0.3-8 ./

docker image inspect myweb:v0.3-8

docker build --build-arg author="pony <[email protected]>" -t myweb:v0.3-9 ./

ONBUILD ADD http://nging.org/download/nginx-1.14.tar.gz /usr/local/src/ #只有在其他dockerfile引用时才会执行

docker build --build-arg author="pony <[email protected]>" -t myweb:v0.3-11 ./

FROM myweb:v0.3-11

RUN mkdir /tmp/test

docker build -t test:v0.1 ./

registry

yum install docker-registry

cd /etc/docker-distribution/registry/

cat config.yml

systemctl start docker-distribution

ls /var/lib/registry/v2/repositories/myweb

在其他节点上测试push或者pull

docker tag myweb:v0.3-11 192.168.1.200(或者主机名):5000/myweb:v0.3-11

docker push 192.168.1.200:5000/myweb:v0.3-11#会提示https报错,因为仓库端时采用的http协议,push,pull默认时https.进行如下修改

vim /etc/docker/daemon.json

{

"insecure-registrites": ["192.168.1.200:5000"]

docker pull 192.168.1.200:5000/myweb:v0.3-11

harbor

yum install docker-compose

tar -zxvf harbor-offline-install-v1.4.0.tar.gz

cd harbor

vim harbor.cfg

hostname = 192.168.1.200

./install.sh

访问192.168.1.200/harbor

在界面操作建立仓库名等操作

"insecure-registrites": ["192.168.1.200"]

docker tag myweb:v0.3-1 192.168.1.200/devel/myweb:v0.3-1

docker login 192.168.1.200

批量启动容器

docker-compose start

批量停止容器

docker-compose stop

docker resources

docker pull lorel/docker-stress-ng #压测镜像工具

设定256m内存限制运行2个进程

docker run --name sress -it --rm -m 256m lorel/docker-stress-ng stress --vm 2 运行后采用下面的命令查看效果

设定2个cup限制运行8个cpu进程

docker run --name sress -it --rm --cpus 2 lorel/docker-stress-ng stress --cpu 8

设定8个cpu进程运行在哪几个cpu核心上面

docker run --name sress -it --rm --cpuset-cpus 0,1 lorel/docker-stress-ng stress --cpu 8

设定每个docker容器所限制cpu份额

docker run --name sress -it --rm --cpu-shared 512 lorel/docker-stress-ng stress --cpu 8

docker run --name sress -it --rm --cpu-shared 1024 lorel/docker-stress-ng stress --cpu 8

docker top stess

docker stats

/etc/init.d/docker start

docker pull centos

docker run centos /bin/echo 'Hello world'

docker ps -a

docker run --name mydocker -it centos /bin/bash

docker start mydocker

docker run -d --name mydocker1 centos

docker stop mydocker

docker attach mydocker

docker inspect --format "{{.State.Pid}}" mydocker 查寻容器的pid号

nsenter --target pid号 --mount --uts --ipc --net --pid 进入容器且退出容器不会被关闭

docker run -d -P --name mydocker centos

docker run -d -p 91:80 --name mydocker centos

docker run -it --name volume-test1 -h nginx -v /data nginx

docker inspect -f {{.Volumes}} volume-test1

docker run -it --name volume-test2 -h nginx -v /opt:/opt centos

docker run --it --name volume-test4 --volumes-from volume-test1 centos

docker commmit -m "my nginx" volume-test4 my-nginx:v1

mkdir /opt/docker-file

cd /opt/docker-file

mkdir nginx

cd nginx

this is my first dockerfile

version 1.0

Author:chenjiao

#base images

FROM centos

#MAINTAINER

MAINTAINER chenjiao

#ADD

ADD pcre-8.37.tar.gz /usr/local/src

ADD nginx-1.9.3.tar.gz /usr/local/src

#RUN

RUN yum install -y wget gcc gcc-c++ make openssl-devel

RUN useradd -s /sbin/nologin -M www

#WORKDIR

RUN ./configure --with-pcre=/usr/local/src/pcre-8.37 && make && make install

RUN echo "daemon off;" >> /usr/local/nginx/conf/nginx.conf

ENV PATH /usr/local/nginx/sbin:$PATH

EXPOSE 80

CMD ["nginx"]

docker build -t nginx-file:v1 .

vim Dockerfile

RUN yum install stress

ENTRYPOINT ['stress']

docker run -it --rm stress --cpu 1

docker run -it --rm -c 512 stress --cpu 1

docker run -it --rm -c 1024 stress --cpu 1

docker run -it --rm --cpuset=0 stress --cpu 1

docker rm -it --rm -m 128m stress --vm 1 --vm-bytes 120m --vm-hang 0

docker run -d -p 5001:5000 registry

docker tag elasticsearch 192.168.199.220:5001/test/es:v1

docker push 192.168.199.220:5001/test/es:v1

docker pull 192.168.199.220:5001/test/es:v1

部署webgui界面

docker run -itd --name shipyard-rethinkdb-data --entrypoint /bin/bash shipyard/rethinkdb -l

ENTRYPOINT和CMD的不同点在于执行docker run时参数传递方式,CMD指定的命令可以被docker run传递的命令覆盖,例如,如果用CMD指定:

...

CMD ["echo"]

然后运行

docker run CONTAINER_NAME echo foo

那么CMD里指定的echo会被新指定的echo覆盖,所以最终相当于运行echo foo,所以最终打印出的结果就是:

foo

而ENTRYPOINT会把容器名后面的所有内容都当成参数传递给其指定的命令(不会对命令覆盖),比如:

ENTRYPOINT ["echo"]

则CONTAINER_NAME后面的echo foo都作为参数传递给ENTRYPOING里指定的echo命令了,所以相当于执行了

echo "echo foo"

最终打印出的结果就是:

echo foo

另外,在Dockerfile中,ENTRYPOINT指定的参数比运行docker run时指定的参数更靠前,比如:

ENTRYPOINT ["echo", "foo"]

执行

docker run CONTAINER_NAME bar

相当于执行了:

echo foo bar

打印出的结果就是:

foo bar

使用-P和EXPOSE发布端口

因为EXPOSE通常只是作为记录机制,也就是告诉用户哪些端口会提供服务,Docker可以很容易地把Dockerfile里的EXPOSE指令转换成特定的端口绑定规则。只需要在运行时加上-P参数,Docker会自动为用户创建端口映射规则,并且帮助避免端口映射的冲突。

添加如下行到上文使用的Web应用Dockerfile里:

EXPOSE 1000

EXPOSE 2000

EXPOSE 3000

构建镜像,命名为exposed-ports。

docker build -t exposed-ports .

再次用-P参数运行,但是不传入任何特定的-p规则。可以看到Docker会将EXPOSE指令相关的每个端口映射到宿主机的端口上:

$ docker run -d -P --name exposed-ports-in-dockerfile exposed-ports

63264dae9db85c5d667a37dac77e0da7c8d2d699f49b69ba992485242160ad3a

$ docker port exposed-ports-in-dockerfile

1000/tcp -> 0.0.0.0:49156

2000/tcp -> 0.0.0.0:49157

3000/tcp -> 0.0.0.0:49158

sudo -i为ubuntu切换root帐号

搭建私有仓库

docker run -d -p 5000:5000 --name registry registry:0.9.1

打标签

docker tag csphere/csphere:0.11.2 192.168.1.200:5000/csphere/csphere:0.11.2

上传镜像

docker push 192.168.1.200:5000/csphere/csphere:0.11.2

mysql:

image: csphere/mysql:5.5

ports:

  • "3306:3306"

    volumes:

    • /var/lib/docker/vfs/dir/dataxc:/var/lib/mysql

      hostname: mydb.server.com

      tomcat:

      image: csphere/tomcat:7.0.55

      • "8080:8080"

        links:

      • mysql:db

        environment:

    • TOMCAT_USER=admin
    • TOMCAT_PASS=admin

      hostname: tomcat.server.com

通过compose一键部署多个容器(需要进入到有yaml配置文件的目录中)

docker-compose up -d

docker-compose ps

docker-compose rm

jenkins与Docker配合做持续集成

docker run -d -p 8080:8080 --name jenkins -v /usr/bin/docker:/usr/bin/docker -v /var/run/docker.sock:/var/run/docker.sock -v /root/maven-tar:/root csphere/jenkins:1.609

docker exec -it jenkins /bin/bash

docker create --name maven csphere/maven:3.3.3 (create是创建容器,不运行)

把docker里面的文件拷贝到本地

docker cp maven:/hello/target/hello.war .

docker监控

curl -SsL -o /tmp/csphere-install.sh https://csphere.cn/static/csphere-install-v2.sh

sudo env ROLE=controller CSPHERE_VERSION=1.0.1 /bin/sh /tmp/csphere-install.sh

在集群的节点安装Agent

访问controller A主机的1016端口 点击左侧的“主机”菜单,进入主机列表页面,

点击添加主机并复制脚本,在Agent主机安装Agent程序

然后就可以查看和管理 docker容器了 具体查看文档这里就不多提了

docker日志管理

docker run --name elk -d -p 9200:9200 -p 5601:5601 -p 5000:5000 -e ES_MIN_MEM=64m -e ES_MAX_MEM=512m csphere/elk:1.6.0

docker exec -it elk /bin/bash

/opt/logstash/bin/logstash -e 'input { stdin { } } output { elasticsearch { host => localhost} }'

docker run -d --name fwd --link serene_meitner:log.scphere.cn -v /data/logs:/data/logs csphere/logstash-forwarder:0.4.0

docker网络

nat网络

docker run -it --name csphere-nat busybox sh

共享主机网络

docker run -it --name csphere-host --net=host busybox sh

container和container互访网络

docker run -it --name csphere busybox sh

docker run -it --name csphere-con --net=container:csphere busybox sh

none网络

docker run -it --name csphere-none --net=none busybox sh

overlay跨主机容器间网络通信需要搭建docker集群例如consul集群

docker rm -f $(docker ps -a -q)

nodeserver1:

http://www.cnblogs.com/hutao722/p/9668202.html

mkdir /data

consul agent -server -bootstrap -data-dir /data/consul -bind=0.0.0.0 > /var/log/consul.log

echo "DOCKER_OPTS='--kv-store=consul:localhost:8500" --label=com.docker.network.drive.overlay.bind_interface=eth0 --default-network=overlay:multihost' > /etc/default/docker

restart docker

docker run -it --name test1 busybox sh

docker service ls

配置容器在overlay网络上面访问外网

docker service publish test-bridge.bridge

docker service attach test1 test-bridge.bridge

docker exec -it test1 sh

nodeclient2:

consul agent -data-dir /data/consul -bind=0.0.0.0 > /var/log/consul.log

consul join $IP(nodeserver1的ip)

echo "DOCKER_OPTS='--kv-store=consul:localhost:8500" --label=com.docker.network.drive.overlay.bind_interface=eth0 --label=com.docker.network.drive.overlay.neighbor_ip=$IP(nodeserver1的ip) --default-network=overlay:multihost' > /etc/default/docker

consul members

docker run -it --name test2 busybox sh

docker compose一键部署

db:

container_name: my-db-container

images: csphere/mysql:5.5

  • /root/my-db:/var/lib/mysql

    web:

    container_name: my-web-cintainer

    images: csphere/hello:1.0

    • "80:8080"

      enviroment:

    • DB_HOST=172.17.42.1

docker-compose up -d

docker swarm集群

https://www.linuxidc.com/Linux/2017-10/147660.htm

三步:

创建集群token

随便一个随机数即可

创建swarm master节点

vim /etc/sysconfig/docker

DOCKER_OPTS="-H 0.0.0.0:2375 -H unix:///var/run/docker.sock --label label_name=docker1"

docker run -d -p 2376:2375 swarm manage token://{tocker数值}

自己加入swarm集群

docker run -d swarm join --addr=master的ip地址:2375 token://{tocker数值}