swarm 方式部署flink 3.2.1
#打上标签
docker node update --label-add flink-node=flink01 s1
docker node update --label-add flink-node=flink02 s2
#docker compose.yaml
#部署2个flink-job,每个job在不同节点上
#部署2个flink-task,不限制节点部署
version: '3'services: flink-jobmanager-01: image: flink-umi:1.19-1.0.0# container_name: flink-jobmanager-01 ports: - 48081:48081 networks: - umi environment: FLINK_PROPERTIES: "jobmanager.rpc.address:flink-jobmanager" volumes: - /data/flink/conf/:/opt/flink/conf/ command: jobmanager deploy: replicas: 1 placement: constraints: [node.labels.flink-node == flink01] flink-jobmanager-02: image: flink-umi:1.19-1.0.0# container_name: flink-jobmanager-02 ports: - 48081:48081 networks: - umi environment: FLINK_PROPERTIES: "jobmanager.rpc.address:flink-jobmanager" volumes: - /data/flink/conf/:/opt/flink/conf/ command: jobmanager deploy: replicas: 1 placement: constraints: [node.labels.flink-node == flink02] flink-taskmanager: image: flink-umi:1.19-1.0.0# container_name: task-jobmanager# ports:# - 48081:48081 networks: - umi environment: FLINK_PROPERTIES: "jobmanager.rpc.address:flink-jobmanager" volumes: - /data/flink/conf/:/opt/flink/conf/ command: taskmanager deploy: replicas: 2# placement:# constraints: [node.labels.flink-node == flink02] networks: umi: external: true##启动服务
#docker-compose up -d
docker stack deploy -c docker-compose.yaml flink
#停止并删除服务
docker stack rm flink
#访问
#jobmanager1
#jobmanager2