From 5945c78256c9ea343a6bd8223469e61fa17d8b27 Mon Sep 17 00:00:00 2001 From: robbie228 Date: Wed, 13 Nov 2024 18:22:30 +0800 Subject: [PATCH 1/3] add docker-deploy --- deploy/docker-compose/README_zh.md | 566 +++++++++++ .../docker-deploy/docker_deploy.sh | 353 +++++++ .../docker-deploy/generate_config.sh | 686 +++++++++++++ .../docker-deploy/hosts-setup.sh | 43 + .../docker-deploy/images/images.png | Bin 0 -> 51035 bytes .../docker-compose/docker-deploy/parties.conf | 48 + .../docker-compose-serving.yml | 84 ++ .../serving-admin/conf/application.properties | 32 + .../serving-proxy/conf/application.properties | 58 ++ .../serving-proxy/conf/route_table.json | 30 + .../conf/serving-server.properties | 56 ++ deploy/docker-compose/docker-deploy/test.sh | 182 ++++ .../backends/eggroll/conf/README.md | 6 + .../eggroll/conf/applicationContext-proxy.xml | 45 + .../backends/eggroll/conf/eggroll.properties | 70 ++ .../backends/eggroll/conf/log4j2.properties | 108 +++ .../eggroll/conf/node-extend-env.properties | 0 .../backends/eggroll/conf/route_table.json | 28 + .../backends/eggroll/conf/whitelist.json | 245 +++++ .../backends/spark/hadoop/core-site.xml | 7 + .../backends/spark/hadoop/hadoop.env | 43 + .../backends/spark/nginx/nginx.conf | 68 ++ .../backends/spark/nginx/route_table.yaml | 27 + .../backends/spark/pulsar/standalone.conf | 899 ++++++++++++++++++ .../backends/spark/rabbitmq/enabled_plugins | 1 + .../backends/spark/spark/spark-defaults.conf | 4 + .../docker-compose-eggroll.yml | 181 ++++ .../docker-compose-exchange.yml | 24 + .../docker-compose-spark-slim.yml | 182 ++++ .../docker-compose-spark.yml | 269 ++++++ .../fate_flow/conf/pulsar_route_table.yaml | 17 + .../fate_flow/conf/rabbitmq_route_table.yaml | 6 + .../public/fate_flow/conf/service_conf.yaml | 127 +++ .../fateboard/conf/application.properties | 29 + .../public/fateboard/conf/ssh.properties | 0 .../mysql/init/create-eggroll-meta-tables.sql | 205 ++++ .../public/osx/conf/broker.properties | 61 ++ .../public/osx/conf/route_table.json | 26 + 38 files changed, 4816 insertions(+) create mode 100644 deploy/docker-compose/README_zh.md create mode 100644 deploy/docker-compose/docker-deploy/docker_deploy.sh create mode 100644 deploy/docker-compose/docker-deploy/generate_config.sh create mode 100644 deploy/docker-compose/docker-deploy/hosts-setup.sh create mode 100644 deploy/docker-compose/docker-deploy/images/images.png create mode 100644 deploy/docker-compose/docker-deploy/parties.conf create mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-compose-serving.yml create mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-admin/conf/application.properties create mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/application.properties create mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/route_table.json create mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-server/conf/serving-server.properties create mode 100644 deploy/docker-compose/docker-deploy/test.sh create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/README.md create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/applicationContext-proxy.xml create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/eggroll.properties create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/log4j2.properties create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/node-extend-env.properties create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/route_table.json create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/whitelist.json create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/core-site.xml create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/hadoop.env create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/nginx.conf create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/route_table.yaml create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/pulsar/standalone.conf create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/rabbitmq/enabled_plugins create mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/spark/spark-defaults.conf create mode 100644 deploy/docker-compose/docker-deploy/training_template/docker-compose-eggroll.yml create mode 100644 deploy/docker-compose/docker-deploy/training_template/docker-compose-exchange.yml create mode 100644 deploy/docker-compose/docker-deploy/training_template/docker-compose-spark-slim.yml create mode 100644 deploy/docker-compose/docker-deploy/training_template/docker-compose-spark.yml create mode 100644 deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/pulsar_route_table.yaml create mode 100644 deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/rabbitmq_route_table.yaml create mode 100644 deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/service_conf.yaml create mode 100644 deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/application.properties create mode 100644 deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/ssh.properties create mode 100644 deploy/docker-compose/docker-deploy/training_template/public/mysql/init/create-eggroll-meta-tables.sql create mode 100644 deploy/docker-compose/docker-deploy/training_template/public/osx/conf/broker.properties create mode 100644 deploy/docker-compose/docker-deploy/training_template/public/osx/conf/route_table.json diff --git a/deploy/docker-compose/README_zh.md b/deploy/docker-compose/README_zh.md new file mode 100644 index 0000000000..eafd257653 --- /dev/null +++ b/deploy/docker-compose/README_zh.md @@ -0,0 +1,566 @@ +# 使用Docker Compose 部署 FATE + +## 前言 + +[FATE](https://www.fedai.org/ )是一个联邦学习框架,能有效帮助多个机构在满足用户隐私保护、数据安全和政府法规的要求下,进行数据使用和建模。项目地址:() 本文档介绍使用Docker Compose部署FATE集群的方法。 + +## Docker Compose 简介 + +Compose是用于定义和运行多容器Docker应用程序的工具。通过Compose,您可以使用YAML文件来配置应用程序的服务。然后,使用一个命令,就可以从配置中创建并启动所有服务。要了解有关Compose的所有功能的更多信息,请参阅[相关文档](https://docs.docker.com/compose/#features)。 + +使用Docker compose 可以方便的部署FATE,下面是使用步骤。 + +## 目标 + +两个可以互通的FATE实例,每个实例均包括FATE所有组件。 + +## 准备工作 + +1. 两个主机(物理机或者虚拟机,都是Centos7系统); +2. 所有主机安装Docker 版本 : 19.03.0+; +3. 所有主机安装Docker Compose 版本: 1.27.0+; +4. 部署机可以联网,所以主机相互之间可以网络互通; +5. 运行机已经下载FATE的各组件镜像,如果无法连接dockerhub,请考虑使用harbor([Harbor 作为本地镜像源](../registry/README.md))或者使用离线部署(离线构建镜像参考文档[构建镜像]( https://github.com/FederatedAI/FATE-Builder/tree/main/docker-build))。 +6. 运行FATE的主机推荐配置8CPUs和16G RAM。 + +### 下载部署脚本 + +在任意机器上下载合适的KubeFATE版本,可参考 [releases pages](https://github.com/FederatedAI/KubeFATE/releases),然后解压。 + +### 修改镜像配置文件(可选) + +在默认情况下,脚本在部署期间会从 [Docker Hub](https://hub.docker.com/search?q=federatedai&type=image)中下载镜像。 + +对于中国的用户可以用使用国内镜像源: +具体方法是通过编辑docker-deploy目录下的.env文件,给`RegistryURI`参数填入以下字段 + +```bash +RegistryURI=hub.c.163.com +``` + +如果在运行机器上已经下载或导入了所需镜像,部署将会变得非常容易。 + +### 手动下载镜像(可选) + +如果运行机没有FATE组件的镜像,可以通过以下命令从Docker Hub获取镜像。FATE镜像的版本``可在[release页面](https://github.com/FederatedAI/FATE/releases)上查看,其中serving镜像的版本信息在[这个页面](https://github.com/FederatedAI/FATE-Serving/releases): + +```bash +docker pull federatedai/eggroll:-release +docker pull federatedai/fateboard:-release +docker pull federatedai/fateflow:-release +docker pull federatedai/serving-server:-release +docker pull federatedai/serving-proxy:-release +docker pull federatedai/serving-admin:-release +docker pull bitnami/zookeeper:3.7.0 +docker pull mysql:8.0.28 +``` + +检查所有镜像是否下载成功。 + +```bash +$ docker images +REPOSITORY TAG +federatedai/eggroll -release +federatedai/fateboard -release +federatedai/fateflow -release +federatedai/client -release +federatedai/serving-server -release +federatedai/serving-proxy -release +federatedai/serving-admin -release +bitnami/zookeeper 3.7.0 +mysql 8.0.28 +``` + +### 离线部署(可选) + +当我们的运行机器处于无法连接外部网络的时候,就无法从Docker Hub下载镜像,建议使用[Harbor](https://goharbor.io/)作为本地镜像仓库。安装Harbor请参考[文档](https://github.com/FederatedAI/KubeFATE/blob/master/registry/install_harbor.md)。在`.env`文件中,将`RegistryURI`变量更改为Harbor的IP。如下面 192.168.10.1是Harbor IP的示例。 + +```bash +$ cd KubeFATE/ +$ vi .env + +... +RegistryURI=192.168.10.1/federatedai +... +``` + +## 用Docker Compose部署FATE + + ***如果在之前你已经部署过其他版本的FATE,请删除清理之后再部署新的版本,[删除部署](#删除部署).*** + +### 配置需要部署的实例数目 + +部署脚本提供了部署多个FATE实例的功能,下面的例子我们部署在两个机器上,每个机器运行一个FATE实例,这里两台机器的IP分别为*192.168.7.1*和*192.168.7.2* + +根据需求修改配置文件`kubeFATE\docker-deploy\parties.conf`。 + +`parties.conf`配置文件配置项的含义查看这个文档[parties.conf文件介绍](../docs/configurations/Docker_compose_Partys_configuration.md) + +下面是修改好的文件,`party 10000`的集群将部署在*192.168.7.1*上,而`party 9999`的集群将部署在*192.168.7.2*上。 + +```bash +user=fate +dir=/data/projects/fate +party_list=(10000 9999) +party_ip_list=(192.168.7.1 192.168.7.2) +serving_ip_list=(192.168.7.1 192.168.7.2) + +computing=Eggroll +federation=Eggroll +storage=Eggroll + +algorithm=Basic +device=IPCL + +compute_core=4 + +...... + +``` + +* 使用Spark+Rabbitmq的部署方式的文档可以参考[这里](../docs/FATE_On_Spark.md). +* 使用Spark+Pulsar的部署方式的文档可以参考[这里](../docs/FATE_On_Spark_With_Pulsar.md). +* 使用Spark+local Pulsar的部署方式的文档可以参考[这里](TBD) + +使用Docker-compose部署FATE可以支持多种种不同的类型引擎的组合(对computing federation storage的选择),关于不同类型的FATE的更多细节查看: [不同类型FATE的架构介绍](../docs/Introduction_to_Engine_Architecture_zh.md)。 + +`algorithm`和`device`的配置可以查看这里[FATE_Algorithm_and_Computational_Acceleration_Selection.md](../docs/FATE_Algorithm_and_Computational_Acceleration_Selection.md) + +**注意**: 默认情况下不会部署exchange组件。如需部署,用户可以把服务器IP填入上述配置文件的`exchangeip`中,该组件的默认监听端口为9371。 + +在运行部署脚本之前,需要确保部署机器可以ssh免密登录到两个运行节点主机上。user代表免密的用户。 + +在运行FATE的主机上,user是非root用户的,需要有`/data/projects/fate`文件夹权限和docker权限。如果是root用户则不需要任何其他操作。 + +```bash +# 创建一个组为docker的fate用户 +[user@localhost]$ sudo useradd -s /bin/bash -g docker -d /home/fate fate +# 设置用户密码 +[user@localhost]$ sudo passwd fate +# 创建docker-compose部署目录 +[user@localhost]$ sudo mkdir -p /data/projects/fate /home/fate +# 修改docker-compose部署目录对应用户和组 +[user@localhost]$ sudo chown -R fate:docker /data/projects/fate /home/fate +# 选择用户 +[user@localhost]$ sudo su fate +# 查看是否拥有docker权限 +[fate@localhost]$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +# 查看docker-compose部署目录 +[fate@localhost]$ ls -l /data/projects/ +total 0 +drwxr-xr-x. 2 fate docker 6 May 27 00:51 fate +``` + +### GPU支持 + +从v1.11.1开始docker compose部署支持使用GPU的FATE部署,如果要使用GPU,你需要先搞定GPU的docker环境。可以参考docker的官方文档()。 + +要使用GPU需要修改配置,这两个都需要修改 + +```sh +algorithm=NN +device=GPU + +gpu_count=1 +``` + +FATE GPU的使用只有fateflow组件,所以每个Party最少需要有一个GPU。 + +*gpu_count会映射为count,参考 [Docker compose GPU support](https://docs.docker.com/compose/gpu-support/)* + +### 执行部署脚本 + +**注意:**在运行以下命令之前,所有目标主机必须 + +* 允许使用 SSH 密钥进行无密码 SSH 访问(否则我们将需要为每个主机多次输入密码)。 +* 满足 [准备工作](#准备工作) 中指定的要求。 + +要将 FATE 部署到所有已配置的目标主机,请使用以下命令: + +以下修改可在任意机器执行。 + +进入目录`kubeFATE\docker-deploy`,然后运行: + +```bash +bash ./generate_config.sh # 生成部署文件 +``` + +脚本将会生成10000、9999两个组织(Party)的部署文件,然后打包成tar文件。接着把tar文件`confs-.tar`、`serving-.tar`分别复制到party对应的主机上并解包,解包后的文件默认在`/data/projects/fate`目录下。然后脚本将远程登录到这些主机并使用docker compose命令启动FATE实例。 + +默认情况下,脚本会同时启动训练和服务集群。 如果您需要单独启动它们,请将 `--training` 或 `--serving` 添加到 `docker_deploy.sh` 中,如下所示。 + +(可选)要部署各方训练集群,请使用以下命令: + +```bash +bash ./docker_deploy.sh all --training +``` + +(可选)要部署各方服务集群,请使用以下命令: + +```bash +bash ./docker_deploy.sh all --serving +``` + +(可选)要将 FATE 部署到单个目标主机,请使用以下命令和参与方的 ID(下例中为 10000): + +```bash +bash ./docker_deploy.sh 10000 +``` + +(可选)要将交换节点部署到目标主机,请使用以下命令: + +```bash +bash ./docker_deploy.sh exchange +``` + +命令完成后,登录到任何主机并使用 `docker compose ps` 来验证集群的状态。 示例输出如下: + +```bash +ssh fate@192.168.7.1 +``` + +使用以下命令验证实例状态, + +```bash +cd /data/projects/fate/confs-10000 +docker compose ps +``` + +输出显示如下,若各个组件状态都是`Up`状态,并且fateflow的状态还是(healthy),说明部署成功。 + +```bash +NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS +confs-10000-client-1 federatedai/client:2.0.0-release "bash -c 'pipeline i…" client About a minute ago Up About a minute 0.0.0.0:20000->20000/tcp, :::20000->20000/tcp +confs-10000-clustermanager-1 federatedai/eggroll:2.0.0-release "/tini -- bash -c 'j…" clustermanager About a minute ago Up About a minute 4670/tcp +confs-10000-fateboard-1 federatedai/fateboard:2.0.0-release "/bin/sh -c 'java -D…" fateboard About a minute ago Up About a minute 0.0.0.0:8080->8080/tcp, :::8080->8080/tcp +confs-10000-fateflow-1 federatedai/fateflow:2.0.0-release "/bin/bash -c 'set -…" fateflow About a minute ago Up About a minute (healthy) 0.0.0.0:9360->9360/tcp, :::9360->9360/tcp, 0.0.0.0:9380->9380/tcp, :::9380->9380/tcp +confs-10000-mysql-1 mysql:8.0.28 "docker-entrypoint.s…" mysql About a minute ago Up About a minute 3306/tcp, 33060/tcp +confs-10000-nodemanager-1 federatedai/eggroll:2.0.0-release "/tini -- bash -c 'j…" nodemanager About a minute ago Up About a minute 4671/tcp +confs-10000-osx-1 federatedai/osx:2.0.0-release "/tini -- bash -c 'j…" osx About a minute ago Up About a minute 0.0.0.0:9370->9370/tcp, :::9370->9370/tcp +``` + +### 验证部署 + +docker-compose上的FATE启动成功之后需要验证各个服务是否都正常运行,我们可以通过验证toy_example示例来检测。 + +选择192.168.7.1这个节点验证,使用以下命令验证: + +```bash +# 在192.168.7.1上执行下列命令 + +# 进入client组件容器内部 +$ docker compose exec client bash +# toy 验证 +$ flow test toy --guest-party-id 10000 --host-party-id 9999 +``` + +如果测试通过,屏幕将显示类似如下消息: + +```bash +"2019-08-29 07:21:25,353 - secure_add_guest.py[line:96] - INFO: begin to init parameters of secure add example guest" +"2019-08-29 07:21:25,354 - secure_add_guest.py[line:99] - INFO: begin to make guest data" +"2019-08-29 07:21:26,225 - secure_add_guest.py[line:102] - INFO: split data into two random parts" +"2019-08-29 07:21:29,140 - secure_add_guest.py[line:105] - INFO: share one random part data to host" +"2019-08-29 07:21:29,237 - secure_add_guest.py[line:108] - INFO: get share of one random part data from host" +"2019-08-29 07:21:33,073 - secure_add_guest.py[line:111] - INFO: begin to get sum of guest and host" +"2019-08-29 07:21:33,920 - secure_add_guest.py[line:114] - INFO: receive host sum from guest" +"2019-08-29 07:21:34,118 - secure_add_guest.py[line:121] - INFO: success to calculate secure_sum, it is 2000.0000000000002" +``` + +### 验证Serving-Service功能 + +#### Host方操作 + +##### 进入party10000 client容器 + +```bash +cd /data/projects/fate/confs-10000 +docker compose exec client bash +``` + +##### 上传host数据 + +```bash +flow data upload -c fateflow/examples/upload/upload_host.json +``` + +#### Guest方操作 + +##### 进入party9999 client容器 + +```bash +cd /data/projects/fate/confs-9999 +docker compose exec client bash +``` + +##### 上传guest数据 + +```bash +flow data upload -c fateflow/examples/upload/upload_guest.json +``` + +##### 提交任务 + +```bash +flow job submit -d fateflow/examples/lr/test_hetero_lr_job_dsl.json -c fateflow/examples/lr/test_hetero_lr_job_conf.json +``` + +output: + +```json +{ + "data": { + "board_url": "http://fateboard:8080/index.html#/dashboard?job_id=202111230933232084530&role=guest&party_id=9999", + "code": 0, + "dsl_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/job_dsl.json", + "job_id": "202111230933232084530", + "logs_directory": "/data/projects/fate/fate_flow/logs/202111230933232084530", + "message": "success", + "model_info": { + "model_id": "arbiter-10000#guest-9999#host-10000#model", + "model_version": "202111230933232084530" + }, + "pipeline_dsl_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/pipeline_dsl.json", + "runtime_conf_on_party_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/guest/9999/job_runtime_on_party_conf.json", + "runtime_conf_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/job_runtime_conf.json", + "train_runtime_conf_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/train_runtime_conf.json" + }, + "jobId": "202111230933232084530", + "retcode": 0, + "retmsg": "success" +} +``` + +##### 查看训练任务状态 + +```bash +flow task query -r guest -j 202111230933232084530 | grep -w f_status +``` + +output: + +```bash + "f_status": "success", + "f_status": "waiting", + "f_status": "running", + "f_status": "waiting", + "f_status": "waiting", + "f_status": "success", + "f_status": "success", +``` + +等到所有的`waiting`状态变为`success`. + +##### 部署模型 + +```bash +flow model deploy --model-id arbiter-10000#guest-9999#host-10000#model --model-version 202111230933232084530 +``` + +```json +{ + "data": { + "arbiter": { + "10000": 0 + }, + "detail": { + "arbiter": { + "10000": { + "retcode": 0, + "retmsg": "deploy model of role arbiter 10000 success" + } + }, + "guest": { + "9999": { + "retcode": 0, + "retmsg": "deploy model of role guest 9999 success" + } + }, + "host": { + "10000": { + "retcode": 0, + "retmsg": "deploy model of role host 10000 success" + } + } + }, + "guest": { + "9999": 0 + }, + "host": { + "10000": 0 + }, + "model_id": "arbiter-10000#guest-9999#host-10000#model", + "model_version": "202111230954255210490" + }, + "retcode": 0, + "retmsg": "success" +} +``` + +*后面需要用到的`model_version`都是这一步得到的`"model_version": "202111230954255210490"`* + +##### 修改加载模型的配置 + +```bash +cat > fateflow/examples/model/publish_load_model.json < fateflow/examples/model/bind_model_service.json </ # 组织的id,本例中代表10000或者9999 +docker-compose down +rm -rf ../confs-/ # 删除docker-compose部署文件 +``` + +### 可能遇到的问题 + +#### 采用docker hub下载镜像速度可能较慢 + +解决办法:可以自己构建镜像,自己构建镜像参考[这里](https://github.com/FederatedAI/FATE/tree/master/docker-build)。 + +#### 运行脚本`./docker_deploy.sh all`的时候提示需要输入密码 + +解决办法:检查免密登陆是否正常。ps:直接输入对应主机的用户密码也可以继续运行。 + +#### CPU指令集问题 + +解决办法:查看[wiki](https://github.com/FederatedAI/KubeFATE/wiki/KubeFATE)页面的storage-service部分 diff --git a/deploy/docker-compose/docker-deploy/docker_deploy.sh b/deploy/docker-compose/docker-deploy/docker_deploy.sh new file mode 100644 index 0000000000..335b6e9b4e --- /dev/null +++ b/deploy/docker-compose/docker-deploy/docker_deploy.sh @@ -0,0 +1,353 @@ +#!/bin/bash + +# Copyright 2019-2022 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +BASEDIR=$(dirname "$0") +cd $BASEDIR +WORKINGDIR=$(pwd) + +# fetch fate-python image +source ${WORKINGDIR}/.env +source ${WORKINGDIR}/parties.conf + +cd ${WORKINGDIR} + +Deploy() { + if [ "$1" = "" ]; then + echo "No party id was provided, please check your arguments " + exit 1 + fi + + while [ "$1" != "" ]; do + case $1 in + splitting_proxy) + shift + DeployPartyInternal $@ + break + ;; + all) + for party in ${party_list[*]}; do + if [ "$2" != "" ]; then + case $2 in + --training) + DeployPartyInternal $party + if [ "${exchangeip}" != "" ]; then + DeployPartyInternal exchange + fi + ;; + --serving) + DeployPartyServing $party + ;; + esac + else + DeployPartyInternal $party + DeployPartyServing $party + if [ "${exchangeip}" != "" ]; then + DeployPartyInternal exchange + fi + fi + done + break + ;; + *) + if [ "$2" != "" ]; then + case $2 in + --training) + DeployPartyInternal $1 + break + ;; + --serving) + DeployPartyServing $1 + break + ;; + esac + else + DeployPartyInternal $1 + DeployPartyServing $1 + fi + ;; + esac + shift + + done +} + +Delete() { + if [ "$1" = "" ]; then + echo "No party id was provided, please check your arguments " + exit 1 + fi + + while [ "$1" != "" ]; do + case $1 in + all) + for party in ${party_list[*]}; do + if [ "$2" != "" ]; then + DeleteCluster $party $2 + else + DeleteCluster $party + fi + done + if [ "${exchangeip}" != "" ]; then + DeleteCluster exchange + fi + break + ;; + *) + DeleteCluster $@ + break + ;; + esac + done +} + +DeployPartyInternal() { + target_party_id=$1 + # should not use localhost at any case + target_party_ip="127.0.0.1" + + # check configuration files + if [ ! -d ${WORKINGDIR}/outputs ]; then + echo "Unable to find outputs dir, please generate config files first." + return 1 + fi + if [ ! -f ${WORKINGDIR}/outputs/confs-${target_party_id}.tar ]; then + echo "Unable to find deployment file of training for party $target_party_id, please generate it first." + return 0 + fi + # extract the ip address of the target party + if [ "$target_party_id" = "exchange" ]; then + target_party_ip=${exchangeip} + elif [ "$2" != "" ]; then + target_party_ip="$2" + else + for ((i = 0; i < ${#party_list[*]}; i++)); do + if [ "${party_list[$i]}" = "$target_party_id" ]; then + target_party_ip=${party_ip_list[$i]} + fi + done + fi + # verify the target_party_ip + if [ "$target_party_ip" = "127.0.0.1" ]; then + echo "Unable to find Party: $target_party_id, please check you input." + return 1 + fi + + if [ "$3" != "" ]; then + user=$3 + fi + + handleLocally confs + if [ "$local_flag" == "true" ]; then + return 0 + fi + + scp -P ${SSH_PORT} ${WORKINGDIR}/outputs/confs-$target_party_id.tar $user@$target_party_ip:~/ + #rm -f ${WORKINGDIR}/outputs/confs-$target_party_id.tar + echo "$target_party_ip training cluster copy is ok!" + ssh -p ${SSH_PORT} -tt $user@$target_party_ip <#jdbc:mysql://${db_ip}:3306/${db_name}?useSSL=false\&serverTimezone=${db_serverTimezone}\&characterEncoding=utf8\&allowPublicKeyRetrieval=true#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + sed -i "s##${db_user}#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + sed -i "s##${db_password}#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + + #clustermanager & nodemanager + sed -i "s##${clustermanager_ip}#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + sed -i "s##${clustermanager_port}#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + sed -i "s##${nodemanager_ip}#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + sed -i "s##${nodemanager_port}#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + sed -i "s##${party_id}#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + + #pythonpath, very import, do not modify." + sed -i "s##/data/projects/fate/python:/data/projects/fate/eggroll/python#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + + #javahome + sed -i "s##/usr/lib/jvm/java-1.8.0-openjdk#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + sed -i "s##conf/:lib/*#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + + sed -i "s##${proxy_ip}#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + sed -i "s##${proxy_port}#g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + fi + + if [ "$computing" == "Spark" ]; then + # computing + cp -r training_template/backends/spark/nginx confs-$party_id/confs/ + cp -r training_template/backends/spark/spark confs-$party_id/confs/ + # storage + if [ "$storage" == "HDFS" ]; then + cp training_template/docker-compose-spark.yml confs-$party_id/docker-compose.yml + cp -r training_template/backends/spark/hadoop confs-$party_id/confs/ + # federation + if [ "$federation" == "RabbitMQ" ]; then + cp -r training_template/backends/spark/rabbitmq confs-$party_id/confs/ + # delete Pulsar spec + sed -i '203,218d' confs-"$party_id"/docker-compose.yml + elif [ "$federation" == "Pulsar" ]; then + cp -r training_template/backends/spark/pulsar confs-$party_id/confs/ + # delete RabbitMQ spec + sed -i '184,201d' confs-"$party_id"/docker-compose.yml + fi + fi + fi + + if [ "$computing" == "STANDALONE" ]; then + # computing + # cp -r training_template/backends/spark/nginx confs-$party_id/confs/ + # cp -r training_template/backends/spark/spark confs-$party_id/confs/ + # storage + if [ "$storage" == "STANDALONE" ]; then + cp training_template/docker-compose-spark-slim.yml confs-$party_id/docker-compose.yml + # federation + # if [ "$federation" == "RabbitMQ" ]; then + # cp -r training_template/backends/spark/rabbitmq confs-$party_id/confs/ + # sed -i '149,163d' confs-$party_id/docker-compose.yml + # elif [ "$federation" == "Pulsar" ]; then + # cp -r training_template/backends/spark/pulsar confs-$party_id/confs/ + # sed -i '131,147d' confs-$party_id/docker-compose.yml + # fi + fi + fi + + cp ${WORKINGDIR}/.env ./confs-$party_id + echo "NOTEBOOK_HASHED_PASSWORD=${notebook_hashed_password}" >> ./confs-$party_id/.env + + # Modify the configuration file + + # Images choose + Suffix="" + # computing + if [ "$computing" == "Spark" ]; then + Suffix=$Suffix"" + fi + # algorithm + if [ "$algorithm" == "NN" ]; then + Suffix=$Suffix"-nn" + elif [ "$algorithm" == "ALL" ]; then + Suffix=$Suffix"-all" + fi + # device + if [ "$device" == "IPCL" ]; then + Suffix=$Suffix"-ipcl" + fi + if [ "$device" == "GPU" ]; then + Suffix=$Suffix"-gpu" + fi + + # federatedai/fateflow-${computing}-${algorithm}-${device}:${version} + + # eggroll or spark-worker + if [ "$computing" == "Eggroll" ]; then + sed -i "s#image: \"\${FATEFlow_IMAGE}:\${FATEFlow_IMAGE_TAG}\"#image: \"\${FATEFlow_IMAGE}${Suffix}:\${FATEFlow_IMAGE_TAG}\"#g" ./confs-"$party_id"/docker-compose.yml + sed -i "s#image: \"\${EGGRoll_IMAGE}:\${EGGRoll_IMAGE_TAG}\"#image: \"\${EGGRoll_IMAGE}${Suffix}:\${EGGRoll_IMAGE_TAG}\"#g" ./confs-"$party_id"/docker-compose.yml + elif [ "$computing" == "Spark" ] ; then + sed -i "s#image: \"\${FATEFlow_IMAGE}:\${FATEFlow_IMAGE_TAG}\"#image: \"\${FATEFlow_IMAGE}-spark${Suffix}:\${FATEFlow_IMAGE_TAG}\"#g" ./confs-"$party_id"/docker-compose.yml + sed -i "s#image: \"\${Spark_Worker_IMAGE}:\${Spark_Worker_IMAGE_TAG}\"#image: \"\${Spark_Worker_IMAGE}${Suffix}:\${Spark_Worker_IMAGE_TAG}\"#g" ./confs-"$party_id"/docker-compose.yml + fi + + # GPU + if [ "$device" == "GPU" ]; then + line=0 # line refers to the line number of the fateflow `command` line in docker-compose.yaml + if [ "$computing" == "Eggroll" ]; then + line=141 + fi + if [ "$computing" == "Spark" ]; then + line=85 + fi + if [ "$computing" == "STANDALONE" ]; then + line=85 + fi + sed -i "${line}i\\ + deploy:\\ + resources:\\ + reservations:\\ + devices:\\ + - driver: nvidia\\ + count: $gpu_count\\ + capabilities: [gpu]" ./confs-"$party_id"/docker-compose.yml + fi + # RegistryURI + if [ "$RegistryURI" != "" ]; then + + if [ "${RegistryURI: -1}" != "/" ]; then + RegistryURI="${RegistryURI}/" + fi + + sed -i "s#RegistryURI=.*#RegistryURI=${RegistryURI}/#g" ./confs-"$party_id"/.env + fi + + # replace namenode in training_template/public/fate_flow/conf/service_conf.yaml + if [ "$name_node" != "" ]; then + sed -i "s#name_node: hdfs://namenode:9000#name_node: ${name_node}#g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + fi + + # update serving ip + sed -i "s/fate-serving/${serving_ip}/g" ./confs-"$party_id"/docker-compose.yml + + # update the path of shared_dir + shared_dir="confs-${party_id}/shared_dir" + + # create directories + for value in "examples" "fate" "data"; do + mkdir -p "${shared_dir}"/${value} + done + + sed -i "s||${dir}/${shared_dir}|g" ./confs-"$party_id"/docker-compose.yml + + # Start the general config rendering + # fateboard + sed -i "s#^server.port=.*#server.port=${fateboard_port}#g" ./confs-"$party_id"/confs/fateboard/conf/application.properties + sed -i "s#^fateflow.url=.*#fateflow.url=http://${fate_flow_ip}:${fate_flow_http_port}#g" ./confs-"$party_id"/confs/fateboard/conf/application.properties + sed -i "s##${fateboard_username}#g" ./confs-"$party_id"/confs/fateboard/conf/application.properties + sed -i "s##${fateboard_password}#g" ./confs-"$party_id"/confs/fateboard/conf/application.properties + echo fateboard module of "$party_id" done! + + # mysql + + { + echo "CREATE DATABASE IF NOT EXISTS ${db_name};" + echo "CREATE DATABASE IF NOT EXISTS fate_flow;" + echo "CREATE USER '${db_user}'@'%' IDENTIFIED BY '${db_password}';" + echo "GRANT ALL ON *.* TO '${db_user}'@'%';" + } >> ./confs-"$party_id"/confs/mysql/init/insert-node.sql + + if [[ "$computing" == "Eggroll" ]]; then + echo 'USE `'${db_name}'`;' >>./confs-$party_id/confs/mysql/init/insert-node.sql + echo "show tables;" >>./confs-$party_id/confs/mysql/init/insert-node.sql + sed -i "s/eggroll_meta/${db_name}/g" ./confs-$party_id/confs/mysql/init/create-eggroll-meta-tables.sql + else + rm -f ./confs-$party_id/confs/mysql/init/create-eggroll-meta-tables.sql + fi + echo mysql module of $party_id done! + + # fate_flow + sed -i "s/party_id: .*/party_id: \"${party_id}\"/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + sed -i "s/name: /name: '${db_name}'/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + sed -i "s/user: /user: '${db_user}'/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + sed -i "s/passwd: /passwd: '${db_password}'/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + sed -i "s/host: /host: '${db_ip}'/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + sed -i "s/127.0.0.1:8000/${serving_ip}:8000/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + + + if [[ "$computing" == "Spark" ]] ; then + sed -i "s/proxy_name: osx/proxy_name: nginx/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + sed -i "s/computing: eggroll/computing: spark/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + fi + if [[ "$computing" == "STANDALONE" ]] ; then + # sed -i "s/proxy_name: osx/proxy_name: nginx/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + sed -i "s/computing: eggroll/computing: standalone/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + fi + if [[ "$federation" == "Pulsar" ]]; then + sed -i "s/ federation: osx/ federation: pulsar/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + elif [[ "$federation" == "RabbitMQ" ]]; then + sed -i "s/ federation: osx/ federation: rabbitmq/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + fi + + if [[ "$storage" == "HDFS" ]]; then + sed -i "s/ storage: eggroll/ storage: hdfs/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + elif [[ "$storage" == "STANDALONE" ]]; then + sed -i "s/ storage: eggroll/ storage: standalone/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + fi + + # if [[ "$computing" == "STANDALONE" ]] ; then + # sed -i "s#spark.master .*#spark.master local[*]#g" ./confs-$party_id/confs/spark/spark-defaults.conf + # fi + + # compute_core + sed -i "s/nodes: .*/nodes: 1/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + sed -i "s/cores_per_node: .*/cores_per_node: $compute_core/g" ./confs-$party_id/confs/fate_flow/conf/service_conf.yaml + + if [[ "$computing" == "Eggroll" ]]; then + sed -i "s/eggroll.session.processors.per.node=.*/eggroll.session.processors.per.node=$compute_core/g" ./confs-$party_id/confs/eggroll/conf/eggroll.properties + fi + if [[ "$computing" == "Spark"* ]]; then + sed -i "s/spark.cores.max .*/spark.cores.max $compute_core/g" ./confs-$party_id/confs/spark/spark-defaults.conf + fi + echo fate_flow module of $party_id done! + + # federation config + # OSX + sed -i "s/self.party=9999/self.party=${party_id}/g" ./confs-$party_id/confs/osx/conf/broker.properties + if [[ "$federation" == "OSX" ]]; then + cat >./confs-$party_id/confs/osx/conf/route_table.json <./confs-$party_id/confs/nginx/route_table.yaml <./confs-$party_id/confs/fate_flow/conf/rabbitmq_route_table.yaml <./confs-$party_id/confs/fate_flow/conf/pulsar_route_table.yaml <#${proxy_ip}#g" ./confs-exchange/conf/eggroll.properties + sed -i "s##${proxy_port}#g" ./confs-exchange/conf/eggroll.properties + sed -i "s##exchange#g" ./confs-exchange/conf/eggroll.properties + sed -i "s/coordinator=.*/coordinator=exchange/g" ./confs-exchange/conf/eggroll.properties + sed -i "s/ip=.*/ip=0.0.0.0/g" ./confs-exchange/conf/eggroll.properties + + cat >./confs-exchange/conf/route_table.json <|serving-$party_id|g" ./serving-$party_id/docker-compose.yml + + + if [ "$RegistryURI" != "" ]; then + sed -i 's#federatedai#${RegistryURI}/federatedai#g' ./serving-$party_id/docker-compose.yml + fi + # generate conf dir + cp ${WORKINGDIR}/.env ./serving-$party_id + + # serving admin + sed -i "s/admin.username=/admin.username=${serving_admin_username}/g" ./serving-$party_id/confs/serving-admin/conf/application.properties + sed -i "s/admin.password=/admin.password=${serving_admin_password}/g" ./serving-$party_id/confs/serving-admin/conf/application.properties + + # serving server + sed -i "s#model.transfer.url=http://127.0.0.1:9380/v1/model/transfer#model.transfer.url=http://${party_ip}:9380/v1/model/transfer#g" ./serving-$party_id/confs/serving-server/conf/serving-server.properties + + # network + sed -i "s/name: /name: confs-${party_id}_fate-network/g" serving-$party_id/docker-compose.yml + + # serving proxy + sed -i "s/coordinator=/coordinator=${party_id}/g" ./serving-$party_id/confs/serving-proxy/conf/application.properties + cat >./serving-$party_id/confs/serving-proxy/conf/route_table.json < /dev/null + +#update and install the latest version +sudo apt-get update +sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin diff --git a/deploy/docker-compose/docker-deploy/images/images.png b/deploy/docker-compose/docker-deploy/images/images.png new file mode 100644 index 0000000000000000000000000000000000000000..cd3d057c2dd207cb5eae505d988ead830ce29749 GIT binary patch literal 51035 zcmeFZc{mi@`#-L-MWM%5+3MXB6_RY(qeUeVg=`Z_mh8*eW)w*!)T78cN=WvyFQW*_ zo@F#+OvqTr*v2qp%AJd_(wyZ!_c`Z&-LKbu9vWQI*~zh& zgNcc0=lOG|4Vjp>#e?5}va^DJ`KADg1b=MtG}O^%%54=M1Ak$@rKP9E#8eQ)xoph> z{=VJq+%-=ora!zGzqj<;iTN@y)lZ*4t#!rMYLXW4?8H88p<1xDQY6zPrtz>FM4E_T?*Fn^U zZei-G5eo`vr@@Ju<+(xE1|ps9Jw(E%+;m1Ph>`O2D%Iml)fKfvD_K;+5ag768a@Y~ zhcCdt#~0yC@MZW4y!xFp?D1~(OU^t}^lI+Nau)ictJW5zOwmc5=5UUe<$^29r4n3F zxM8_2)5+xlrD_jLSLF9d+Vo}U4fONOGb%QsUCSs~3RR2Da)$*pb@s%UUf~}gL!Cq z_>$N67WXh*|3ksYNP28a>x5r3)I3$pV$f+X{di^ z`Q=g`^(49W>38GQ8{85@33xP`hg$SApntLh1{h3ZGcvX)gJ7~OE-SRxIpe^!ygT?3B{i8hy7}pU+RSuVS{0L9`6&D=j2~+o)&LRihcZRnN!gxJ z_sE%^WwCh4cP4fKSIc}yKMKJ{exZ@3TuwA*CGWe#5}6b5%3>K_IKGA47*=s1fGH)T zgO)~>Dj)HpZ%K3j1H0fwTZzhDDhe64A?pxlS~)(%{UCi$4xg%yk!Gt6_~V1litjus zmllV4M9h6mO{ZdWrPI`zrX5nLgy~-T-u|F+Zg0)4iHF(a4V0>v^1w!1V^v;iAG$#4K=-3%m@(nkc@#IS9Mp1H) zj4GJg-lhiCp3u;oNmF+O)kx5jH|6r^N+)bLHo9DMi-t33OipHHS;<;fa)j0vBw4ie z4tH||4U3Kq4&YCD>g5t=dDLE2Ngw>+WNHV^6sZ(K<@?@wFJ-&JF77xd=rY1kl4rR>jAPBVd=T`4{Gm8-@nrkGx1*?=se|4fn)HB7ex|@bSCo{N6wFR^ zehRWIo=){kGiI9JiA8i&eaY*n$zajqz4RpyPIX_R-5V{XEEMw@=+pmX@(r;_b*1%Whw+D`o>WiO79BraE zGUuO~4^s4|&t~YSWbC)sK4Vra%)xmTRxZWVdw?QuMoZGWqaQs=yfJPhz2$>IM@GlE z2J7f~_V_1XfFnst8IYjImw3gQ6gvOS z6)CC-<%li*9Kqs=A7Zf8J2{QYCUB9@fi?b9y*O>U1c%eAYSWDGB)^=FzEv|gKV9m2 zKZsA1>GmrsdDUs!%Ja7PZw(gr8j1??m`c7%3_CLxb?CyD<9D~FPmPH&lNS)d0m3N~ zi?$=XpuWo`H8u0lisEOWgCpfpwPL~%)h8;ew_FkgE!Yw;b^2L}(}Oa)(o9cS;;p1s zf?B*ks&lpTdLh*gNL|o0Bc04-VaiUWULzMxx!&mn0oKh(ftBwrR*NpCkH=92rhTf* z%k4S?Rj{U4P)i+@*o7RXjCycjH0AGpjtDFTMqujcpb29!AX`z)5swV}OP;|^yWE-6@N(3E}W*R>xHQ;7WEL#Kbc^5o2F)E6pak;(ak>CGg z?5h|QvZznD0&PmoUUZ9^utHEZx3jxBdSx$ancne1D!%X9%5?1%xn#-{OJvFbp<`bF z9lqQqwv0cpVkdqwrUO?OF-2Cdo{}os+Eq^^FWZ_1iYB`~q;@ulftB0mG`i9Q%cWY9 z)jwpOY-d)QAPv$n{wpnXOGck_eL{5A%8^@aWQ~*RUQ#m1gl{B$U53T4dLhDyj;w26 zYgTU1SUI(X-Pzy5{Fx0Lm?-bIAYvRgcMcl3Rl2KQENUe$fAksS;00Y~jcPf0Mw*>jq?K4>% z4<6wgJTyO=kcb>3c@<2Ht<(k`Qm4$nP;#|8v2fTZJ#o74Kl7FH0;Gq@sgXwJYV!(j z#k%VC7eu!cY1=>UWisH@8R_Q{NUX-t{4-fDJG4SG8jjPtBST%4B8viOsS+R_+9 z=-4oXX4feGAlOt-9kEP*Wii80vg?A`OP-gm9voWgp-5}va%26MBXa)pqtj**0WSVl z{ta>uBfIs74(#pbb{ryu`&8)NDAsM~iN40XmfFUpT30x~K!T1qy&9N6@bqG);EOevxo*yn7Oc!cJOy#jHakedT1_%9hBIh zPOHN!l*#JJAv)$`m7}_@iGOGJuZ>gK^N*Uf$?61AGoq51@0I{ z7mF@*8WYNV^zhFk8dshqw&q1APnKP!)E_OB*$YjjXc2CsmlE7|ekYD-D}cDJ!{%if zt;~=!kxRndRvJ!!Q>c+8y%O>P_)q64a@AB&&Ne2}6XX}IqrM~DsAFn;mny&WQIqy zBER>e_D+B2CW9m6tMYNem%+SNnk)W-ehTTrgz+C)I8Cp|xroy*mEnXc`k$xbb6i&_4jwtUH{OAjhaPZ*{`s7P1tzDr@+dmjGUr_8S8&+T`Usn=Ds9`rJJ!8 ziZXR^K&>w=)rB?G489wy+Xl8|>}Ueig7&8wZgNQTW@NjPCjYKR+rL7!XHhRtr)g_d zi>57%`lEZ4us=vcsPK!_!)~Y3CbkzfGzd!|^Dfg;H0qsIcyd`PRtTnF@p{bxakKXyJIbbNGhS?LVAwnuDY0qRLYkhQpTAi@5C7nch4{ z8!LYn)};@ltlb;gwj`-YQ1K!`q{e`UaPgvU#RMlsNLdp9wr z;$YjQL|Rl@U`()grYX`*`UQm}2+#g?%+yO~#ssgi=m5@}Sa9C7SG+8T|B)o8QP1!! z9ddQgmK-*wB172okc-Ft2`l-fYSRh1j1G%j``zdD^sNsa5tui+K~uSs6RJILb>T^c?+X*=P&bAM8*XD&BqGXN&8>z$W* zN7}2Er`Nv_)=#B;9Q!rj{r320qxuQU038AM5`@`i6&bYar;#G`X*aLPcbOexr~Ocb z2PWD}HEw++>O~k`J|AuS2pQKv&Y*p{%Gl7$)DB!I@a;k7Bkr%4h_zeiqN>%efJHqB z7WCfMeC>kPwCN0|^rZKjTnQZUUjX{NUpL%QBatSM|M(Fn%#-C!Y;6+ap3_HlNt{d( zlh?@SA9Sl7@eOcu$GcJ=$=q*PuypJ89gKxy3>+X=L4jDYpxFjPPSZ2hQ?C*DSze11 z{2bME+fsqP;vZFpoF}1NQPe!^yGt3kAmD1*A3!VozxJ1wT*jOHXV+s#O)Qs4mrsbS z%x4VxPgmVC<=!X4AN$98@;~gs;#Wcj|6pQv+S0`dz6`QRCwb&w z?&XVR+Gom{V#OY>%t##tr)=*|DP?B*XtsK-9tH_09%j8ataS&O*n76V5e&Aw1hP(d zKscgs=;)S{`2wp~9wXJnti}6Q%N?{(#55k75@!SwR{KEIyz`MR6Hhjafedp?A|p$s z)qc;>z+&s2e~0Gz->jC++LA;?V;`Zo$~)$u6-14t`xTRSgU-%hXwhwHyK2rII@b;T zn^|pS?OV;>w^`2hDk|A5TtXF&S}M7=u(^=%+a6eD`eiQp(VL-tcXUe1lqVKq?eYB50MB@B!E|u-0yuzp9Zw*Lr)-H|9b-dW($XH1!mKH3VN}qJJ zPw9Xh93OML7h%?Sfbs@EVcFU5Q*yC?SF5cU01OJ*0dLCgCR^uuHePbGCq&E|N zCz`ySEh9&t$H&Ga2+k#8b47D$nQ40_#eiEe+^K+1K8$()Tz?F-?`;eHk^R?z_#lCm z{7*WJSvu7XORz!9Q{~u77c$r?Y9u)g!vju>bq6{Jjn})aIEqq1c3{=rvYGX0=z-}y zt`%2hSXfkFS%>KbVAP9xsv)Td8AIcb)iI}5EEJoE)WG(GOYSM zrR?^Q#;1$t+nJp}g9`OElHojZY5L^k=S57*cUKRI2N6$Gw)X%FIX-p>Re5z2R!+{& zX8FzyjtiTE_D8Y#+_FZ0$bSU0(2XdV{-XWq;#gTy6Y~sTL|%uJlJ!*Ny})zrz4M-;&Pya{X~@5pZ3XH=J!q%MOr~=Li0Cz3mKLU zjYV%Z4d^@63Dsxp!7vXbH1>P^of?sUO_RWgjy3Tk$2m%86P;6iJQeRg%x z&t@(ncesH(nDNHSwyI|aWknZI=T}^_dmY1(o@ihLjC;0DgE;(>EU!F-{gLM+zE-a; zWshrsNBvoHi6GfCh*ZakZ9b^g`&ePO*`hq zFw2*ieK(Nr+10Pm6*Gj^20N1Ox7)Mn8Cxw;aHiP#y{P9lYG5si(l%0dD$C9b6SlI^ z+vK!7^$Zeim728VZsO>rb}y(2;ne3PY;f1t72jWJ49If4RJjyY@MZtC0x^+;lPSJ@3TQY|4Z<=T!oXqf_0EkracD7AC@ z+MGQ*EgqNM{PMuCUCkZQ|1Hs*yfc&9F1FFp!1geGFUYIWRd_^7`0D~mlBLC-ir!~G zj&hJGeN&Qf;+9ic)yDMTUY_CW&illhjAqMU`%#cQ>$>`|(P9{y!|<0>k7 zT|q^3 zDTNbT3x;WEa!AKq6UNZipxReRphdfV$0@v1ZQq=0$;GQ@Yb(kf zFEJ&_+L9Mr!Y>~H8>>9VGZn9y+XuHFE;P-TDxPrSQ!COO&$9JSwl_zah>;-2XM2u4 zI7|Ic>YQtWGUXm@vC!oAfdw|ol4f7{cAW?r))rsP3kRW^JHA#tQ-g%d?T<^&ht?7` zkJIn@D9fa?^zqKNoVhWT8JNEyiI}^)D@PCJoy-{bh4#dGw1}2(T-g09 z?OoNDRjY?&p|*OPF-JTHlmo^QbU9(`D~bTFPOabMuHlyB{Q{W_gQ`#KoLRp3+Fx-? zQV2Bgt6s0q$TDeitn!2$4eTk0~su-Q>X;=cRn{IB~JC=^*#G<(D>Z zC!_qXGH(q_2_$eG6V{J~`X!2_ZoffiUyBX9mq(spc9=c=n~|2$%gfe^{u1&?{qem@ zTp~gmD+_l9RTCREOywoDjQDEt1||Lt!t=eI+v93h7dkhH!`3CvE2uQN;p(v~>}rL9 z7fO)B*LOQbtDGG;?D)w2v<QdX z-j%JQcIVGJQk@ohSS`qB;@ZL|r7^7Ip^TD%3QDxf}}Z`k|rb(i5C+ z`Z|AUoZwPtE?1#UMj$@O+0T+td5RM_yjQ{6P3)gcc$kv;q4dV5C%ovit+tCq8gJ|PV%&N3PQuw#@pyZdm!ao)eE;z(LYBg^hitYn})O2 zOk0$N$I?C}m!6({JxKLR3w)|_PG$#LhsGxyrYTRqXR8qTMsM80QOt|l>q6+oO0nu) z-jPsJHtbsTENPa4oq8DS;?|-`*#dv0QLt1$T@+cpKzWW&W?%P&gizU%iy&fs20NeN zQvPWh8tRu?3qc~S^<;di_^8>|OGuDh^x3dDd+mM%<|M#;|>f8IEX;kD=jd+~9Z_h8jPd1A4QgRDJ#>D~f&tlo~0ms>}s{A_C!VIsX$rvcSy zv>3_A&4S#UBq5yTe%je^#E?2^jT`HUlo;Q<^sVm$+8;o+ZykG1BVNRYHMDrN^KqOQ zf>d=&`+3GERFaMc%JmQ4;8+VcU$Uq}7m`r? zxB-i=&&tIrHtvsLaU0uaUGQZM<`#Xis(OU+XTG%%Qd`d7vGaR1chIp+s9Z^#@3A88 zoKxroBo;RXB+O#fN7v2=KIBg}fA>d&o*;Dy>E9q%@NpGUrW?vs#<#j*N5O$+<%v|C0d>Tk=m zxM?yl{;L0?JfnAQirS@^TP}MB4V-Q68%vkLhP>FWQ#iF_os!`)_zG$i?sHm~0i~R*sEqUjxS6DuOIy4P ztq^AU2h0l9U$lkh=*Qam4gqxf6XzYatIo$^CIMkXE97BkRgxKpbrV zfD8bf{T^WnN8LmD^-FWRd3p4*`~pie^-tu%?*7C}#=+^kk2~m?zSh6CEc)uaLXa!R z{4NGiGj#t}=J3u5(Sd_imT&JUB($K)Z71qu?1~w4Tb+Sndmk*N7OOTRt>n9$;Ib@7 zEN|2rh_u&gF$rz{csIU5Lz@A=B#%ZFFK~y?N0~(E$Y7K@sV6q0i_adeOBJ*Oa}psApr=l>%k_uz{XjZH@{`EVIL@Ka??d$t}Lp*(s#6Sgdgqo z`cT2Waz2CsXw58qTwd_3AX*}0Ci)8eprti`UDlG-z+G`DAH;USvPw($SkRI zGFGUpw2Lj158j1W5#ZWM7qyQ3~dzG4t6WnTVEjLSl%hxO_U0sTb^eTc&T;+kppB^B6G|D;t z2bX?jm$y`DC#-P%GOElyW&&L#6HBiq(D^%F zljZ4B%hnI07(1)y5y<#VdFkBZawF(-`-GYZycQro(0IFqUg?S$SngH>fx^#>#`bth zQSE?SzfYDG2Zzt;TCRkJ;sD2-pz5=_znl{ak~yrQ)Z?e4i`%M{cO)P|+F&)=$f;!{ z_WBilsMwySQF0vQL!`6(vX>??Y|OhDd8bFZG_*A}@nf!&u0j5Ry~f1blm(~4b^qg$ zkFEA;S3l;IVZLNz@aBNs2dWv)T^F)))!{yM%i10X9yCfX<+pO-$Fv(78|~^dFxt_R10!O?+W4 z4)>Kr*JsZjkJ4PapD@>FRbpdg@cMw=$(CC!PKcl9p@=TgM=U4s6Myhl5m`=04X6|{ zYeQN@3gn??sKboR$U%iX7$cv`NdFLb`Q*B~hnj@^M?`j;qlQ?V|CtS4zStfw6f!6* zd+OIAK0R*pkIUv;J;-$Yx(UGg?YFzI;Zo+2-z9WFi#K~UKL0N_kwv++m`(bh_Q7kX z`oQzh&DVr+F#azRl}GO2dK;WFnjxD~8`|xsZ$D<%%V!u7BtEzpS&ohP7-VB+DBK%c zND$n&X^dc^6TurN`afquey5cjG&WtCkK$6w6c4rTlKb=bK#Mk;0>merC}foP;`F`l zsqFWy7reiJf126wJ?{DXJsK*wYVVDA^~LtU{Q%_JiMbqMZ7GC}hON(ZatDWJ_Eb#f z0sTem8#g+oQLjZqB04tR(s(%#G89n_4%?>_PHFzKF~}~MhM1?T?S|)_!-T;%+UjR6 zp(Q57?hEp@{zcsA(b6~iOBuaEp6;5c^2LaJ=kMgR$sQ23vR~uqlNWSyYkIb8z%03# zo3=Slzy~V?6mW?-n(+&8J5UENCBE-YqTLFb%H64L(=qK1FQtcb9xbL zi*BAUA5t>zc2s0kSc5;s#(RCmddp@UvOf-K$-#nt-OBeJ2^ zmIf>E+0Yq_4}}lNwD@XPom!&l;Gs_8L?hqFt|+u%mhOi5MZd5oMR^_* zTM7mV*ADOsR8#Be6m&vuS`jfF!yd9Smy!*^I{d0BBfkWof}$g-RYUn zT5{2Up9QA#F}YLqnN!0KVvhh01%Rvzrf8uKP5PbFh+zgqX+|99SSsS5FMOe0Yw|wV99gP7_b#r%n z0jn(E*{62anKWNFzJ92Hz1F~+qhNo(nv1%a|A;y`P>_r?-3WX@Fo}Ts$n*?6kN^we z4C(7IQsg@|xbpzEkOfm)Jiy%oh0rGc;YGO#p$erpZGiJnhO6d7Wt0kCiM;64AqD{J zGuDS>Z;bYNvtH&h|NwUNpK+YggB+d0`};4+=)5is|;i{=g-;qM4;;jQ}O zVLws|TP@Qb+K(kq$O)y}s#v72$NDBR^?k__(4I!d@gYx@!Orshq3i6gHa(bE_R_>E z5Nybe`)BjHT-p9u{Wd^2#D$6c5yYHWZE+_dylCUu102!V;Kc#E^Xun;Ku`0@zoUv< zxqaaDUmwm3<2MJ)dN%K{iANieNvJ~SW|Y7lzah6DDt77A-+M5RA!;>0`*phc!;fq} z=bxv16XwG|xc2MH1K4pQ~Br=}Lsbqt{qy!ri{dDofEALw%^8a~_i^h4BO;{+Z9BP%= z&p=8CqGcport3kB{WJ3MTmq3Z0T5*ly0}x#_u|{amFVw&vp_ylqIeBQA$)mq(t{V< z6%ZgmSjQ9s!T44fVQRcX)0D;wBw8R=NxjE3_oSRCbbOnoaydr!I zJbD{ezB%rYrH0a`(q~-DGj;n<$rGMzcP$%hLqg)e0Vui3=x8cW$EfysJ(SG$t?|xD zzGH9JGE}PvtMManuE$@0$k#nNuUhjT*q1>vkKWs*0Hc)E;4$V=Q>$LICH3*j)KItH zgLG$CWRHlsQnCDd2E(<=BEFc0MM>Pg^ic*AJjqhL#an**pT5_*QOWZZ|As@^qktrmg1z-480M_r* z*|#8;&h+!|KA`C>{jrj?hY@s}xi10X+mwA%cP@<5o$ItTlo_h_D#{UAtOVh`f3d~( z7sXeXa(G_rNRyTN=-K8azeO@Tse!l)W(cr{=g0JI{jh2My^Q#B4Vt)BvL`x@=Ys>d{Xjj=*ab!i&s(xy`tQ3Y=Hp!w91 z;L&gf=^1Kb=TG{m>2TVX^$q|3Ie(s|WrP?k8!QTOnP0|ERp=w1QZf+_BCasz)!lO{ za9rxf)(sT=|6i_n+UQo08qQmaj#NEP`ZDXOHGhUdNZ05K%x1oSqRhbNKauTmhcF?H zp<6DE_i34R>Wro6S0Fe;n|_E4{PX}vcfZ@6e?ol+({gRW+NEBNDsPQ=Z-MF=UNiEy zexHC75LHOLS^US9byT6N<#~?hIMLa!AqTyj_dvWVW9ilaw{9R%*mma|TfjZsGE`MO z?>Ghd=Sx<;+xHVxEo8Xt7y(Lt*LC=Pp}i|lgbOiKY)&#{zz?6^!V;_z&<-)^T!uRV z=d5;etx!1B7rQ^c5H{6f-8=sa7qQ1v_VLriO3qA>%bsh!jI_VjV%u^Hr?Kw=V-!~` z#!+0DksUsk=D}c-=KJ63ELmlrf2JPFmWx8B?p{;Hei01O1~8x>MTs=0%;sH?>*HSA zM25J$efYSVU!@~i}9TPb-v6E6kEFV)mXaJg;N>1k-0Zz)rWrPYA9ac z7M7r;xu}?Zb~TZHC1o`ep1N|CGwZY0Zm(QvRO(9{St#dE2wsCCzjG&(rv4>V`{4t* zJba@Mt{Kdg+0IyQPTY(~;bgc$)Gw9kN8tU5rh=mKJz?RJm3#Td9_%rgTl(B3d0|7B z7u)-GGlh{AJ$Oi9y~xq6|`f=J+32M?@_--X4V}tHAdG{ zHY)&@E`pF3rg4Q4^73)f$HN8Fr?S!`tM;+SJTER#T(@U$nS*H0Xv|^m`#8x#=P=_I zxA#*>+1n86Cw1&Xw__&{OH-b4-{!^v4ipD~?P!zM`+8V3zH;hl)hsJ?0RdC&14(=1 zfJ)p4;`89m19yoJS1A8Ut3o=TY{^9hX@KZj!TYwXX}YT|Y;(+Kk#8r{Gj>WZt7dS& zGo;$-ekjrYEJixKgze*OTgY|g&w7dP6>fK$IA>W>Q<13qAKyWNVSm|t4PG@$N9!_1 zSb>j%o~njDq=vU8icB%Sq~4z=EG2n+Q?H>Zah9FI>048SU!sdN9ZgcQ`2-mJr=r&T zTfbakq&}nIY*bhyFFB$zH*bQJHunV+W_&4}rODko0eSYL5g1KF$W4l3PsV1nz~C;u zRU89l?dxgFg00>`(7Js}c7VEqk#dL0VsW?}X^&A@%Yb`_51*8zv#s++?BMl3jca4j z1%kGPWvlGYr$d19K_hAX_+ti&GJX9yh%1^T{o@h*uib2I5@%3PzK_~9T z9v{2w77-Ny$Yf4!Td|THZ?6)hKgxsfVgQT&Ppq1+ldnY~*2^;p`VECNbQhZF+z^j4 zw@k+Gcwl&U8nf-bw^Q`EPP^Ls^}!c-(u_<~&Bd`|xicLLf9)#a3)<9S z6<(#O5TbSDLK$RwpNlgf*N)z=Gc|vW zW}v;woQ4@>P1t1LB7FNu5UjTHy`HmP!Zs|Xa2zarL*(ZSX&N7gm zEC8nFln;8=`F-l-84paDt^~8z++ed`0CnvbQ806K)+p9fz6Q=kFI@|P9M(*Qpw?h1$eKw^Fo|p* zM)~1e4jYGt#78Qq4<8R0kzYc7MWoX<28l!GtFlonxnnU-hGYfph%>eqFbvLVC!mSf z@K~AkPcBN`K^cTF)IxQt974~z!Rr@l+QWx8#vb?nv0d}Y<10M6s`x99gYD27^Bb`p zh){qkw(O?Z5f(s&sE@Iv0mT@xv(URy=+=*=o4=C4J}&XMdZdj+cc6ekM|}(kJICYi zGc%;F83cx$v&$!Yqg)waRRt|~mtK!{KzxuNjtOCL7JV|4k_?Df9-t%h2CW~}Ex_7B z1R|P0VV$j+wqDuwOF)ymlRD-;F{sqHu8{Sqh(QlEFa(#fr$-_vU(S145q&Pfx{J|6 zv{$yB$xh1@0G}U8@8Mb2!Vn%!L~`$B`~sm4)DN z|FDMB1eHsK%L&N0QI9E~FSO~8M3B3Ey$OK#>LMfpd^sQZ$~JX`FXAe!C}-ueX4bF8 z^|Uoe6!LER0W^8v1fiH_9qb1U96R-k6-1>iN5m7_4foG3_Gni-k1O;X-z}2mo*dLV zQVaJFv>*0T0>sumqH%p*+QMQ%2lBK1jt;+6qz7tw2HTpsifSQVx6z&v5Zp!;&wl&l zJ(4U1r5yUS+&w%xn>Wso9!EDwmvJE>JXw8{AO|8RGY9(g05{sxjZXZow`AxQ}T!n^4;TaCg|Fm=SVW&PKoLfcel zlDVp#Z;8eXOizhSEVgV?_P!Wc5kKSJ^keBi-u+F;h3B&ijrGVMAI-5#BZ0YDk=%C|6G%w_0!BY-z>vkz;iMFTm<0f49(g>yRrzkF^ zGJdxMcRWvq%vLrI4T`s2cDQ)C`oc)T>&@WPPs`fn8=LN`;oy4n$@|*V2;l+m;OLAy z0=V4aL=}9N)|>DWg_i!kkxK;*Lghtm>{~O?q{LUn4D(q;>HB$IP@ZG}%v$jmzM=x^&s2}>PEb?TM zCVDh9nnlbs%GTx{t43_Bk5M32HvNf9#86=Ho2vXVQ!HNUoWfyI|^WZC-g4;wL!bTe-!lS z#)l0|E;nXgVO+`o%l&-kc7kmMNtL+FdCEJmtE9)%v0B@*P$Uz&Tc#|~V5!$UsC^Q- z(lUrE?DBj>8B=i#Z{0)EPc|Wx7lYmZ%GUc*gu2R6Lht9%qQ@I%G-;vKlx;2^6Kwhg z`S~R*KrrO$D;UAHwhu^}pOuh~rc&yuU>E8sSzdKnGn%V;)2V?aF6o4>99U2OM6Hjd zjAPdP@?Rr4@}ijwi@lxof2KOs<+)znfWLX<+a4D&>t*unVNuSU=I6dQ1Lv^5tSOTE zA-&kQ{V~N@^;VieY~>|B71D`4^OOa#o*V@FYirg}5#+8BEcN>yZXl`gvs*1+TP+2` zUVOirgQy42*{AM$aYm$Z-*s5&ZJXZWsRGqnMOXyI*>w+paf|3ek_&BKGS{#{2j--H zOSTJER>?3LKUb=4Y-Fi2dpU#?5uR++rH{I7?$!DxXtvT3Rpfn1gjXcX({ZxH-J#*^ z*}R+JcH5jkQy%%YiB~PkvBRniumL6YJDI5k{8N|jb#?nB0$?*TA=5rOj@3;^DIR5r z$ZsIEk)Pu*t(n+ff>!o^92rs4_{01DLRBxU)JuEOwC!*|w5uoJXX24lS`CB7+b4}Q95(@*O_ z^#lJVdN))-?jKc2agt=9AX-mi|AXQM*~W64jp-Td7O%}o@=b~Tfw0bh^bj+=F||^W zjCl0-{EQ-7sIpDWujDIg&S-NZn~3gzWc~Ns!$%$}^_5~e%C>kc|s~G z{O9$tpUx1|1qcsfxPY2u^dv(M=4vQ8V1S zy21fSTgU$b46^Et8gm!Qdt$6zF(?UKEkg&z>$^ny*=iSf~A>DGD zp;lg{+0s(Vs3*fJ{07ZkrWcj4i?{l{qeE&$I)dZ#IO0OKDMZ13GKQkg1TldzvAn%e z_J%FE)E1NHc7==~{#5Bcwuh~ROw&uTitb?*dBY+~cKx|sgQ9DiQO9H}(r0~3-D9q> z`y2R2v`3K>0$wy>w&Lr!KWa2JJ`V_h@9j65>s4A8Xv*4iz@B`0XTn{+%M&iTqOMJw z2{?`Ds_a^}$RE1{6fA9z?ujZ6tW&(e$(PDCA@b8V|?D|1D(OrhGPtPLvbKjcYHVYRm^3C2}? ze3K9EizpJD$z5i2ctp&aI|1h;qA@NzImkC3h&v+!Ja;R?fbe>~T6dKucJ83STy6r5 zteG(oihL{KX8#XR>`U>-0`2aCa)>1=Cgr-0e0Kl0c>>CQ`v&nnAtm5?&!v*tx)cMQ z<~!sfEZt`OF}kq3~C8 zoRZl{Vp|r&Rzz|qiVi-A%eU=!&Yh<8ecj=z1{QI(;G0K2FKBkB(nwjAOI>=(IWPvs zV{_|e$Eq>?1P!yL2a?PzDmDULfm5b|CL5%(pxT-;L~dfJH+3C<$^N@=2PQ3WgB*FR z;H5p*u0W0Bq(>XN1O=!{taZvZ_kSJY1KoB1NV30+_#2C?y@^Hs-^E6t*4e9ebFDMy zW^xyVWCth6K&ZNYn+aIKSvOsYn=i~>VFsQ2HP?D4-~_S+@lyielL`(f`6% zgo84-4fP_^UjsK^nLwKA49Z+V_zy9cU@lC|DA2dY z5!!R@R?>HzRi8{>tWWdU8v|vx>Cas-Q^b0tD^&>u0lmj$CcN)Dl@G_y7K>&{_p@yx z^X!80oPY#qBQ6gAFFTP%oi?Mz&jF=k{PQ2(?W)2T=OG{gJ2IN00VwcN}l0Qh9Y*DWvx( z|7oH~Rf;*Hkb(P;*~4Yn``>|L;Cwx*_tDISH+iBEx64F;R+k%Tz81eM$q)ofRowJAY~NGr3Njb9 zkgTf}lYn6EL(4*;6VW{&sbkEwdH9>kr`Y3*0VBx~#^DQa%4dc~_ zk;PLzPh-Z(42AIoFD}DvkF$=JQIxrWGk2TOnCaxN>|Q4aNi9J@o}Y_wJA>^<2o_JkRLBDwm-@o@H-+BKDJ?Gp(zfS8|}|H z;2eH4ika}|TW|D5ld=Tl50G-x>r>h@p(fE(kd|0jeT{*kYz7s+SjditX@r9ZT(;MK ze>rSazHt&9&l$;p*bqC1)d>Jx$7)S}I;(MUvlJU&nR2Xw7naW_Kja*xhv6x9fK*z( zBqr};s>gg)yQWDnDh1T`VwP6>WAa0xh%|?Vo-UdnxCz&ug-DvXW!nr=5t5;whMYOz z2iUASa`XhY{eVdnN~m9}wrQ^{2eflt%C{;Ux*;so)?ic^UDCE46yyE_#0wg&Dy5?d z0JnRNBn4TQ6f@BL{TkndO6tIpHQk;ngc}!JV4(RTR_1}@j-5lJi6vM}qJLtfpV#<( zUi94G-J_&PBXO@lE#%?Ot=B81TcSywNV8zO8}c!h&ib#Xbb`fu?}LXf*6?`v;Yijq$7o zj`$Auy}DjUMKC4w{As$=S=vnL`gsa)HYccl2BPwZK2-_56S^ZlPD1Dss4QPS`Ukq< z5mDnTW?KN6gUW#0W~*?UF)Q7(JIH+s)UYGZ+EaJsW3i=N*E_am|9)H{2M z4)1`|5@?6){oPXE)8dBM=M5~hCyy_0-smjra&_2uoEV?N-g*5FN#NipGWY8pfJ%0B-TO%ZK+u zujO$Xl7~0e$E{KS$}zCg%-!dhSkg{R<^EPfAEaC(=9-B2#^ z{f2Oxk%s_!@|cL=bYwE^7Q#Cx9CwZYZJRLRw_# z2OzMS8jq*5NZrzgn;qVy)`8 zPAK-i_|6rS%WB!vJe9zUSZ?M~)zFdN*_B1*AD@)XYc(8E1&*DiT{>^8z7~`M*p6Sr zN{s4lY)oTaN@gC%UjU`*6H0FUaJK#+$ySx32hp(-WK5J%vwClQhEZh;lmL0l+jMC0 zx#*uWwVZ2+&D%o?hy3jwlXSfyEu)W@?;g*I3QiG~2uVf<9rK6#nb)`RY6cyYsq!$e zf2Xbzlj8TQa*$DeyW??ix+nsu3PMi$O{~pOgxg;~39>q+n<8`+Ft?yL7Zfn$<&TvCwgyUkj)>!E z7c0{pk9h4|cb4PR?WrH`O{WdSz(bQkP&d`j`wv9Lc{y9i5Mr@uZckeN1uK0)cn0vzVF2 z!M^G&LZe%S@cacTC<+)Cbe0r8i}7fDU|v%^*S3@>q&$70B!>hbc;=8oq_6qwTz=M; zuRgPs|Mt>V6nNd@P1G$tlwoe^UZ56*qswuRs)7phz(Z58V!kgfM2IA9|9h?Ws?=7@tOd&lH zO{zLvK1OM($Bx(XI5T%JoiJbx-31v`ID`xFEhD%S#M9NqB@M-_QXSEe-&%e#F&`wV zg6aDfncTpLT-rl=PAqqnFw3*MTs&NQxro)7yxsoExPc(FBkIAOwa~t;povco8$n3yM z(uhu`zyyK`6f4<)@T>)SY%dnC!`r=e=y)-wo{{%dI==5CP=sIqRCpfcrtiylDq{zU zk%n&Zd4V!|mxF-(TP6gj;zkBFxWNr%`W~WzY5>rT{fNhmZy)^dLpZ6x@m6JmFk`r* zFa)!o2FAgfHoBfZy3jldYb*Dfqi0TlfHhI|X2uDpm67@26-`!|E!h&K#%|by;dIR_9L+!KS90juQQ-V zGS~RIzWHBP%X^A{!nU=!-Zcg+|{!=?ym5o4A0yHfcJTG?Q;ug1~%;{ zw3MT#HdkMI^ghP~ej*M;0_t>D|C_RI8V^2wVs@}NjD3%9kLH5NK$4T_7TnGUy&>TL z|Gpnz8UY*uU^=)m*RbUv2yB-MGue(6kR5Vw|rwZNON|+ zLdW4;5z>~mV_lUA-~I6?20|NXv~bVmNC#l5PeWIq9?9$|i{0-L-TLdBJFy^fHAk?V zKuIM+`GMAg@W-r2w$#ir;d-p$n>FWdwycX6e_w2`VO(oUnTkLI7w}}4v?S}}J!EUP zJQ@v(3sf?$%Y-h*0Ol5dw6Imt!17RrD&Yc(xb?m&@uh|b#NaAgR3Zj;v^b9h`xF9; zGQ{GxO&6u;g!qkIWpw%)@2VC+_HEjr2)URUV(@KnQ&+t$;$Nlu z$)&5gdy0m@qjU>|>AnwF74ypN1S^qkHEnKb>v2^NhQhK#V#9{sjPRBX^xwxc7H}>v z8j6iSblxIfBDSxmb$WVgfAUlk^#aK~lJ(ik>Qc)@e|#g&2{V6IFpjKJzcjms5CwsC zRj^*4I2j5A2Ns_`fCVcpG=ZX*kJEp_qoq&8a>A01Uu<-^37~k5Q zYaz2>Rh(SQ=zr8#6^+?|E&A&NIU;O*ib$o!R`*?@=+;8(vf zVz5YS1VrvoYxR^5iRa$nCg~o2_+7lHu}C1`JZ&xt)#!1r#EyAOY!8_BnlT+9!udjR zb!*Q1aRYf$4lp`!VUPY{%(d$a}PFme|*Ks;?pZkByF zjZ{{ulPbKicWnjlmPwiG95_hghMly{5!?7rlrIx0HgNI2$$1;v`BvSOx z+`cqTG*%(WBz5$o578#fn)9Nkby&iMdA+A~%y)#x?EP8S06q1+oUv}oayMC3z>v@; zdLcdl(1mMhslo@}97o>lCYq#>&EC{4Rc%0E~m;s>fka-}V!-vBiM7ZQv{{yZU-HJvM zQ_BXN07Ec&01*9)8l}zVMNRBPVHooz86irfyRx_!$o?!S5kEg$_o4&Rn>2DqIEx<3 ztx@5)`b5HXEI`A7tN>!^fJS~0iv>UxaJ4lHyOq2@(XC^}TKHHocJbmzG5#}V=>u3! zakGv$oYpN$>mPZ>U00h{g9P9WJj?&V-{NSqcDkA&uAhp7r z&GY$aIv(@22#%_Ek~9tdW*sYhcSA7{97SVvm@Sx6@Oadr0O9$aGrA?;F<@9Gt96tiS?Q` z5p1f=@t7C@yh#<=%D;b0=#{M7lmCSNY)=IU>U9Q5i~oW5w%5o)X@fx%5^A<=u~CqJ#e{xvVv5BeIG!8__mt!?ThYQ8);@(6lRTDkNfgoRJVl zVgHVxnhDx$TTreVnqT%z*nl{?1407BJDY_giKRcOa-cSXH7rN)cw^FWFZPD`aJDzf zCXj9f|FAp}fGM+%hahp`Njk6@E#kkv>c^}`k$!l|aR!u*Bm|`}L_S)t3aZ>fdQc^d zs{l)HjY-BDt|$dM*E{f<_Y!}DFofaewwCGb+O`tE+UImaR95Qh_LaIj z%3qu&+KOcjjH}KUd$Dq_MNY^5G`d^W|8XxG6vDM+7(m@sEb>+kd^>#~ zdk3C!|4FrWHA~b1c?Bi$JFA^`|KFC(W%7vAjz)_Z$qp;k^-xL~gS^Qxi%-AP&2><) zPf4|kLhF0#fM}W~!RV9-L^%lFnRHb3>vp2HdiLJ1h2Q&rgmZPC1U6U4h=^<(Z_BNW z9p7Q(tnyBD0@aW(Gg&04g*`6MeE|VwF^(^p{t0CkeEw-a`=|ZvfB*KgwYS-$(3t<} zg(*1NkYI=2#hwDyaX&xXj{lc-vdUtc zsUt3Z7txw?QAcfgB)gfEiWNbpm@NcJTYJz2wh|*dmX`e)`z_4V(22R?SX*Gz+9;(3 zmQjBFa`WTAr}IP&7+!%30Q7Ba_cZm?bkAaU?;TOJAnlFTT>$+yK6r)XA`~e%N1Jl1 zh|%hB2(%v56+ya&}=1osmmoX5em4)IdkDIf4(beEz%ddbxGVGijMwg@O*Xae{6Aazw;hoy7`qMaJb8FVk zHIQ;4<#3tf>urYB^mv^o{UOf&;FX-1qbsCfsfQ*{@)BTkO{XbtbMZW5C4r*Dt*uXp z*Y6u*4LeG;)(j{>z40ov36nWv;M0HXDl28qW#Ssgk;VXNuDElrag$f`$HI2>QJ={t z<&gQ?t1@qB{&};KR69GCGG=KLPXT6t4XbnTJ)A?ZZ|4&-f^m~nh1h+b%kC!*Vb*T`aCJv%Z&Ea!NiSwT{#caG&YQYS7udHG9sgWaPAF!Vbi@B>IutSMH& zE}}+SB}v4(l5jTzAKh1)jONpmhy#nwOMrfdR@4VTzjxT^dap$7vyJ=uQI>T~$EzOQ zu;j`@`csd>T#0_C>$ix=P~4>BdT;-C;-5#AecP4xIeEU_y@Wb*Z+Y(fT>|II zsE+n1x#13uw-$SvK;oG$6y5KpKONQb8lChvV%ruZo&IJOVsR7@FTi}w-IKtq>4Vy^ zAP(W5gLPeyKm#VIMUvYuu;^-{nem$4M+|Fm2_;35jzY! zobMX3+%-TcR9d3F4mN69cSQpC%aVB4nF3CnCLMqlY)@bNBarJ(PSm)duNULhwMYC> zBAAY5VU`P|J8+(8u?{T3_r})WPC1tXRmzvFbyi#8JMF^?mQ)!fNe_a}U5XNhhvZOW z#@)UQ&V%W=T%PmSu5ZkitFrh?e#%atJ89l0uratMqdheB0M!8^h(W+yXe!G^w1*pJ zb^JQ_FMQN|d&ElX+U3VPDS_KUu8_5KH9XE78S$K6W)ZLE;06ec-egBL|D#@NumDA! z&MPMAiHf-ae!ksT$c?T+A62c!HJ)IvN69Z31Sw><64g!A;oB%p3$Ns_)!&_VJJ_OL zNm2tg|DQyC&&3=VKTfR^RIVY|1y1gFOO;)Ttc$59L$;gk?U3!paKw`ZoT{vSucCK7 zsLQw0e=9u79fjTZyb_$TkCy?wF*jOE!5VN6zfr2dXjI)S-}Z>?7kH5@^0??fpDAh3 z8A-}5^>T;J7QY+lM^$X zQ%vCIDa=i-t7n9NAz6}xSPrwKRMSnpX$#+NJk3tU_r>RVXhBd8^;{O_t~K3zuunKxH zRfGGS(!A_q34z-_YJSuL<E{+IjEov)f^sn3MJ49HYsHSq?NJW4%)?ilT|?E6&rU9eJrJ>(%q(d*dn_ zHWjNOpFH;2RbPGhSJBvOxov0Uh+77ZqWOXNY6qf2t*Jnm z9O5v9xSWz1<&}ch0%eXrNW6W;G{t)<{LBJRJniY#01d(}3Q5=#ATwU}v+sRC0HLA% zaC4XuW8Ux^JzwUA5b59UiU(!INuC*;@J4!q8>6Jgczc6MNyGvGe|FJzWMUMnb$rgf z2EAxwsW>M!L&0|SW4*`rT?F@nR$#;(+w``R?fl6mIr0k!%>TGA=H+#B#K0Xs_s7Q* z?Ow-(R<8#bHb&bUrp}D0i~&p&xfc0_B{NR(AWG7)pEeX(VjUc{>ZI}kJCS@%i}lZJ zpL6yXkG?<9gBbB#?F+onMf&&Q2nX?=W2Mx&`HX|t)J5}z31I8hUP&e$1+TClt1+Pm z;&A0IXF*qAV6zE~csu|2GOse_?f}z6bK{f2A%;XUfh5aImW*S`9QNmr=KM8xU8V=T zzOLYunV>!tQY>?0w2Nx@pos2jv;q*C;$j>o2?|8%E!D@2)LBoqV#xgRcyK9PsZ$;p zk-gQngCZ>ErW)qSHtNQV@6J?Z!fB9s^gbc07_v!n_<8=CX^s^%p=H@lRTLyt|Dz^@tB>SRQoH?tLDExFMr6pPHs% zr-F-lR*n2Th`d%eCftIk(^^TKCMP??>iV$_&fd%5OWE$w_iAA{bG)110W=iedyT`s z3Nh{0nA=}Z8~z;z^aLe_0Eog zm?>Y#LEs`_0*}{?Ohgt8^ZT31e;LlT3W)ofJO%sZ-fcysHthQ@>*d`DBEGIB#)7+W zwC5USRKS)Qomo$+XMQESTsu-7S1C?nRnY}fv23VE6XU7(NHZ-@YW zvdc1gn70ne-&wHp7=1@LfH<|-Hh=gcLRY~Nu5W76eK#fwMAow*^9yDPROAz0szL16<&VO zwn$qjI2k#{?`m%bg1Kv-<-o(1M1hiMkJDeHgP#X1*o{1FX2xMkNDXll(EFr_UOCmeH(W?`KK8>cM7(X2u)w(k=%Gy9a>yTe5zUc0{y#R*CS&NgiZ`W1|-3 z1+w{6zTQI8oMVgziRHOUPbdpvT@VW=OVq78?)qyQ8@`31dVXDpIf~4y?{CFj3kGYw zTsfSd8|lXAQNwQ2Udo2TYe29y$|XPdfZ9xA^p4HT$yuVU1bUbgC=)}hEnTI{x1|MN zRJJ&{0Jgk-GFNY38HFYTqSUF_`#3;an@w{I)QCN(UuauktpZ)BoHMim9 zY{=mQ5g2X)RB6VP?<*EK$z1MD2f>Bw)ixtfjZt_SX1NQ*X63gUqdosNKLhKzX&Wc4 zX<_&OKDDehDEPn7t6RC&!((dJo*j#2^72{92HYY*#}FfY8sT z8WLv+T07k?2();Djus)-uyDd!GoPP=*07Iu2mThIH?01B~V)m*!t|mcTll8> zYzXcX;-`Xsaivcgg)!-?2S2Y=x21s_QlfWazs`>OcsD+&WW6Q=UcZX1zhR z-@A3<-YUc9fzYPyfMDaZ4im#6{@Xd|cR52h9n{?!3(-e_B)th*I9RX!-tb9;S2$E4 zw{;CK6m_}8cnZ?Aql?+xH`+E{9F#{}yQD;Lu7rS#wJs66B9_hxR4aF1wbF=`N#POh<#it<}5493Rv& z|I>j(+~L^|_>dW>HubEm>wLw=(DL(uPDKf_gESFREnD{E3TWw{X-m93G4)BsknljZ zOF5O_DT>#;0F(Y@_v#>Lmr&?-rOu_|x#B*|pWv5}2SYCons$?B0`zki5W7{~)v0x5 z0wMFO3mNJ8OM|^?E{;Q+h|9CLbX?68esFIn>RtP45$lR@f@6~rvIpP;sJzhxh!8WL zVQE{G)Vesg+QJB3P8LXSVz)E$TK>uN;+Blgi1Lh)wSrI%ahk%hMR^;))=38uP-D`N z6;24RD@C8<_`aM1>Q!)c`^O7$e}J3Rm{yx;yILB#){g|v(&lXE!r+rz9|2NJF_y-c zH~S3{{A=|J{o>hjpnl&sTZ-&ZIMVb}KB|qwBO*CuLj$ZEGsreyx{l%`BDv)|E#_Ccw>@*#X0GA0 zvcaE6>UU?y4ml9|i#D>gng#F(egI~34%qd|oye^nb#Y4UE|BB1WrQ5Z&xKYAYRy*=*YLE&1Ab@x8{ z^(R+IfeXL1aj5Z847FyXJGmii^1b{KHrtnYyZ&`hj2A3debBpT<}qk7H1n3ITmEXU zMQTX=sY64JkTIQSoJ{kV*~h8=V2gP*UUK899KkpNYVtaRTiPEr065dB46%1rd^i^5 zF?=B%$~aLsLDpxZE(#ptO^cCN`LJX0l=Glg@FUM^MYThd9Jce!w~uygp+$Se6X;J5 zbWBcOQJ|(KGg240W29ws`B3X$^53f@JuEoIX|!>9MjFy^Ny!Qi2)8>n4}F;Q2W7Tx zr9cgJ1kI9o``t5uc~jpd>#pF24|qzyn4-#Xggw$Rl6nByCK}Ww!m24|1^(_CTv?WA=oG`dS9sex}7xuI}3okqHGkGr@RYMrOD{Qx7VA> zW8ZrLWMMb@v zCx3yMV#u&oXqXLHHwRzu8g<6#5xxbjoj9nBh`+$h{I2A5r^NtdTm5~fB7pf1 z8Yq;Q0Gl`M2Ki-4sbjRh?9jxF7vCO=pLPUq5QxKb=)YOh-!Xld4UWX=~fhm1R9 zO@AJC8+DQfGM_I~ywalcGVMS-zmB`V`)vdG2C&A^$bR;FERrlbmS|4b*|YoKalQeL zx-@`|^m2C*?Fc|5g|pllO+6Qa!@tTkh-;m;(kDOjmGf6d@gSOof_@WU;29|{w8=RU zL{4!sfUos{)cUJ?05O54d*7)nOFb)~TiI zix1-MBu#|o21v1lPs25_kA62Urr3fo+|puI4sqDj58^Y0rQax*XuNIYv2T=lr+x z)S3O~g0L@Z^*!^b^QWsFT4Y28HeP{Hj_@R(G-jXaJJgn=`%zLSk3Y`ak)8LLMC{!6 z@I8LQup5gO78?cZ&`wC^s|%C6LHNWtJg4F>+zm*yet4V31uzjTh247N;Rz~q7)`ai zrA9+NqhGQ-mvPC7@_?v0#{y(4*3aAB&=91 zzP1Y(Znyqsqs!4D6iQkh%+i;evD?p^pFYbC{dvZ%@o!-tx6bKy*3W!V`#ZPoCjU?)Y8DQ&y8eXZ!tTPLy+$ za?;-Oy|d;x7bG4eAVHDMCsJziU8G#KGjxyxdV~ni=)1tFT|S-tEQT}UfcK1U2{J-b zluv&$-*fGiX!*!p@OplO9*?coB~9H%lM;`l`n?pD$M2_ziY5$^Fz2Wv%{EAMzJ3wF zFnb(w9qE+O;fzG`O;&?~Dr-(7u2)==1B|8zcXE-f|8{>Z%w|JJ6jZV4jkV4PIiQqv z-Q!hqMiOzqYvjN%oVaE;4qZPe8c%`1@@-pNFv=tv1Q=t}V2>>200PN{m9`-`Cb$k{|aAq< z6Jo${_|3$r)Rh71m>vd7;0C2rlE+3D8y3Vx4F$WCLZ%a8;70Wiu6*Nc-I4zai%Q4F z*>3dM{zGW`htTv7qWB*~@&66s=pRDUKZK?=!}33brqzi^naKaygeH^WHIay&vbK6! zrh?iiY=O0&vfN3j&WOkU4l5&(4qX6H%$1Tc2SlS}^zPKPS@bHF394iIZvFGctzSe7 zi(OE>7AaL9F>!!g3*?}#a#e*4R-8GIa;&v_pLcMK{VH@(Fz&u&-i3~n1?E7{g>6AB z9{Py^S1EqFej7ieK(>;q-RR zkY_m7SFQy)e7CqR>{paDVo35UghK`*3TX%BJj1Xbq$tn6<;&c=(Ps#{nDX8lxqpj2 zRTWS1lLZd7g9n-G5yi0PoRm&y6o)(KW6}KgF0NOb)qguOx<|^ZIH2WjL+P-=nb9~0 zEB%20z@O1t38^5J1L8hRwzNq)RBdXx+I)wj5?L}BFUW%7BDB>wC`g%B0}3tN<$nKogtR+dH>nut^hsMabp+QwM8!;O{%3%wz?ki0!;;rs`W|<4Yfv z2^W~0_uj$p4iJ>@AMN0dQc*%Xs-d{cbu$s-k~H}od%d>AWtmWIk7i&-sFRS!iA}pl z)=KcCZ&WRTyhE}BNMx7ulgM4?qksS622?}z%FBhW#bLu7jR76dv1`?I8q@^4_oD}U z3^iC)r*Zg&8~6-wkbdMj@2OAp#|Z`zwPOZd2N?at_ZLg}WxNv{K0}+Ekl;D|)ARx$ z%VG%+o+<`v$0l=XbB-$+5$7H{KZODYmtB!^#uqz`LB#cDF&K^WLO7= zGsG!!5n!!>LIaYJh`}&dm>QYKzjfUl?7P9MB+ISO8IYDMdhG}5Gru{bV!pnQ2`K5s zdW7Z)a&>%nv=s&fM4a+K7MyUiPH^x)pu?*7-EkyiM1Gog(Ig;Z+<{S2fzW7q13X;o z)gBiatny4G=z9vrG#vSL=^3k?hvnQ((Vyu}pUA?Z4;%QdMhcz3PT8Q#TpEO6_35f9 zARSCvt#;xAU@-K9dPlQ7-2D9#-7CMldl{n5u~kK|t@UwffKSK-$mF-YPSAJe#hO+m|q zSoA`J0fx%GpU3~jPx=l3J9h&G;$K;neS|8mk7< z#tK+;rkT|yB2gkd5v5k3@v8|QC5%GA;F75Gt~H6ktw#YD)$At;o-Ls3KO0XDLt?ALy8HN!!;H-2F$yvNrKoVHJ*}MksHXq^T7~T zUmIXo3q737@Tm~&!I`!czc6xDAG0ja*fwSG4-S)h+rPcgV59aJhyhiah;MF&c|Cj zPc^FQXO!s6gTfa53^JF)j&nH}RLvvG69_U1G6t*{L{eHJSz1CmWtXfvTa(ucieWFm$K+ z@D@;lQ8={O~U$ysE*>~q1zG#5w1T1+tYZL$N@xxmyEi7Pj$0{x@=5s0y&mo z-cK8TiWI-H&e6spj@0^1N#Wvs<(_>B56tDNE*!gelrt&~ky{@(m0}Y2VxJWB!-swD z0LA!%iI!cHL(}y12ZL01qiO1h8=*XZDF?yczK3|WY3)IrVRNVaKpO+uS7Zr-m%*#y z&3&GCfM8O9nkznS9HCg_`$fokED12YZx1inyU8Y(rDBpff`PIEP7@9SY{>#69ZwN1 z$fPL~RDlho?vFa@ZX`p7U|MT~*06o!`*W3|bw$lEGGn@F?Slh&kJzXn zyJ-NR*4sC$&z5725KCVUR(0N0?4i^Mb^%(JbEB6G(Tsb`TjAEdI(^p(Hc?YPd+3J) z(*aXFLe7^TDc8^jx}wf{+Q`6Jzh({~HR?pX;4oX*iffEIgZ2V;*I#L)7E4WT@t6`X z`Nk0IEFdF>`NN7!I}-^drR{Yz&>EIkIzO@|c!TOFb;K6c5=JQNllhVLOIJYnq^H2z zHXFf)Kc2EylCNfVa@Vqv8W;PMM1J1PJ5T2HTISC#N0hlN;y@#uSu%JksP)#1oSbOo zu6w@jlj`rzpvtBk%c&k0#&sfq;{_V)>minA*F83$1;ol)-k!i^QG=cQOMRu9JI}cUh_9eeTP)MBC0D4+>}7Wl$YpadtW&!H{R>br6U&fi z4gcuAY1M>s#+|a9kuNtDfzkBy9{(>P)!Qp8Cmtd{nvBsy^)?kcIM&jP?=ScwCQszf zU+yEIFmbfS<_CNY?5A!-_&6x~ZGar6@JMlp>b}6O#_xh-Si!Kkh3{A?*TO) zz22##+9ksVIoxr$a37_VjsxId2`fDlQo3WOYxI=v!zD9lddkA zn)nGu;2c$mIbB1!B4Jvw%uGrf)fmg1SklXoC=&WW(Ka%8DfOmBu;^LvqQq=tiX-)@ zZQ@Hqk=Itkg$49UO|u@8idrMjmn-?ZKG8N!yjIA*Op$MokP78xH#~7;lvo{bf3^<4 zq_wE_rJ*@%T3atzV&aN~VuhFgpH$hK6>yAl9e?pF#7-mG7X7pj=_$T>WVWGeV=1>2 zN@?;7HN}?&+S}i0pT9BQvz&`K>FkJpvPAZ_`;6IUm&6*bI$B6oY z{TQ)QCqC7QA0+RtT=CJQX=k1^@!o~cDz$+xQ7vCGmhciqyFM7~6OYiPbfqcHALO`J z8v2a=HyCl#q*ZqQF+9Vq+u}w~mtoqbnnU&iU>7!heWZ~6v;>Wjw>?8F9@X7!p_GaU zgAWbcZI3g@?*ee95#a z4^PyQ8+}w&I^qj&?fYcoNm`jEw=~x5`@i{%x%lB*@kpn`tb69~$IXg*ilWVzlaDh_ zac?Gh$)Q~aUKH_WG+(S0oP6n#&%0GslhN|5D7^kJ8`i5mHP2c~WPlO7}tSK~(JGV@BBuBUzIN*+gx+bW9S z5k_g7R4xw6)VYK>ly9o-is z%O55D?0Rg&#b{)EKxBEzDGPf)Zg~DnsW=gRLxWpKEiEZk*(E2xoPBLP{7rb~j-wqJ zx`c-h)x8|6lIj%k5z2cLd1b^`wsw$hWQ?-Q-Z!E5T*$Yo*+A!LRjr$qZ&`!Rqm%Q# z`MGJyxW7x+-NJKy@a)Wif&kC*k~0=FAV=silPEs=i7T*PQP7Xji*yUPzvr{~(op3{ z~iRSttZpKsL<;&1{5AySF*ZFhy_UDz$;h})E!=JM3#-)2y| zkQK*Xr)EuZyE$~MH1{MWkDkk95r0;N8kDj>;Px0WZx{SpC-ASO-Ul*Wlrw)9f8|}5 z#k=2#=$x#3f8nmfz-G4~nGB{dIKm@`@?Q2-oqk;%TaD1>i?}M!#8Tbj7Qgc9fZ^IL z$0XFs70Htr8qsEN)PtCY$+k2d>K&(H?;G5y=SuvFm23p@FHMdFDFoFyy)6naEh>Q7 z>LOfwh{xXU9b_A^wP?fco*tzH=` z35alO;>jIJ2bamUz-U=VCn2nNVJ}*GZ)+8UafgT4M~pdo`<>K}vDJK* zux?mWabfljTCy`RQ?TE>V|&kIVF7$bFosTvEe=Z~9VZv$ zBCe_TZx+nOHdIQZ?;erM$T;MZ>u^Re5WZBdc-q2Vr$7Ip6DpLtRce$*@9sz3dt`U; zi@^!+yF=-MSKsU0HC3cU)^rm}Unr^PaIO6QHLps4P;`b{+VSCzo#0f#+Pwxlu4Wfy z(G0i1N~wv{DXHu=pbwjjxL6djBzsJ!!gjPX&IHnV;vyTB|U>rH_hY z7WQ==YV>%4i8o%JC#{&sP%Y)WY)0%=J^`JkpzP>F^9{AxBe`19f5m39;tNjGa7!)1 zHItK}N!3OIp*~p`q7UihKBwV4PuKAs-|8cj0Nodl*lpq&l2PH74+rG%6AA)5A9zgP z4SCycZqkiUhEcv`?G5u$5|?fceQe})J8^daPX`_jJzemdhK|kbkuE%}bF=o4Z3ZZB ze4XO$dI$yUlxX5qrbxSYec0i^8P*Y?1&lwC{nTWk^GZUPT$%TB-pPM?07 z>^NnOhz@RVe|p?-g!hEk7L_mH6>E8m5ojJh`2J&8V0=T+wr7Y#Be1G5Qiy>~7}eL;TzPEU)p%8zmcF3J z|Nd&qxe6z zgBPe4ZMa)|p{tJSGbG<-Ez)wFiy?2%;r7JfoZu;otl`4!k7Bhwo!(heDCy2%e(#0b zi$ret6?SvBd3NW>%ROD4UZkyt$?n*;qD + +volumes: + data_dir_server: + driver: local + driver_opts: + type: none + o: bind + device: /data/server + data_dir_zookeeper: + driver: local + driver_opts: + type: none + o: bind + device: /data/zookeeper + +services: + serving-server: + image: "federatedai/serving-server:${SERVING_TAG}" + ports: + - "8000:8000" + volumes: + - ./confs/serving-server/conf/serving-server.properties:/data/projects/fate-serving/serving-server/conf/serving-server.properties + - ./data/server:/data/projects/fate-serving/serving-server/.fate + - /etc/localtime:/etc/localtime:ro + networks: + - fate-serving-network + + serving-proxy: + image: "federatedai/serving-proxy:${SERVING_TAG}" + ports: + - "8059:8059" + - "8869:8869" + expose: + - 8879 + volumes: + - ./confs/serving-proxy/conf/application.properties:/data/projects/fate-serving/serving-proxy/conf/application.properties + - ./confs/serving-proxy/conf/route_table.json:/data/projects/fate-serving/serving-proxy/conf/route_table.json + - /etc/localtime:/etc/localtime:ro + networks: + - fate-serving-network + + serving-zookeeper: + image: "bitnami/zookeeper:3.7.0" + user: root + ports: + - "2181:2181" + - "2888" + - "3888" + volumes: + - ./data/zookeeper:/bitnami/zookeeper + - /etc/localtime:/etc/localtime:ro + environment: + ALLOW_ANONYMOUS_LOGIN: "yes" + networks: + - fate-serving-network + + serving-admin: + image: "federatedai/serving-admin:${SERVING_TAG}" + ports: + - "8350:8350" + volumes: + - ./confs/serving-admin/conf/application.properties:/data/projects/fate-serving/serving-admin/conf/application.properties + - /etc/localtime:/etc/localtime:ro + networks: + - fate-serving-network diff --git a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-admin/conf/application.properties b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-admin/conf/application.properties new file mode 100644 index 0000000000..0333ede207 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-admin/conf/application.properties @@ -0,0 +1,32 @@ +# +# Copyright 2019 The FATE Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +server.port=8350 +# cache +#local.cache.expire=300 +# zk +zk.url=serving-zookeeper:2181 +# zk acl +#acl.enable=false +#acl.username= +#acl.password= +# grpc +#grpc.timeout=5000 +# username & password +admin.username= +admin.password= + +spring.mvc.pathmatch.matching-strategy=ANT_PATH_MATCHER \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/application.properties b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/application.properties new file mode 100644 index 0000000000..979cd0b66b --- /dev/null +++ b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/application.properties @@ -0,0 +1,58 @@ +# +# Copyright 2019 The FATE Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# coordinator same as Party ID +coordinator= +server.port=8059 +#inference.service.name=serving +#random, consistent +#routeType=random +#route.table=/data/projects/fate-serving/serving-proxy/conf/route_table.json +#auth.file=/data/projects/fate-serving/serving-proxy/conf/auth_config.json +# zk router +#useZkRouter=true +zk.url=serving-zookeeper:2181 +useZkRouter=true +# zk acl +#acl.enable=false +#acl.username= +#acl.password= +# intra-partyid port +#proxy.grpc.intra.port=8879 +# inter-partyid port +#proxy.grpc.inter.port=8869 + +# grpc +# only support PLAINTEXT, TLS(we use Mutual TLS here), if use TSL authentication +#proxy.grpc.inter.negotiationType=PLAINTEXT +# only needs to be set when negotiationType is TLS +#proxy.grpc.inter.CA.file=/data/projects/fate-serving/serving-proxy/conf/ssl/ca.crt +# negotiated client side certificates +#proxy.grpc.inter.client.certChain.file=/data/projects/fate-serving/serving-proxy/conf/ssl/client.crt +#proxy.grpc.inter.client.privateKey.file=/data/projects/fate-serving/serving-proxy/conf/ssl/client.pem +# negotiated server side certificates +#proxy.grpc.inter.server.certChain.file=/data/projects/fate-serving/serving-proxy/conf/ssl/server.crt +#proxy.grpc.inter.server.privateKey.file=/data/projects/fate-serving/serving-proxy/conf/ssl/server.pem + +#proxy.grpc.inference.timeout=3000 +#proxy.grpc.inference.async.timeout=1000 +#proxy.grpc.unaryCall.timeout=3000 +#proxy.grpc.threadpool.coresize=50 +#proxy.grpc.threadpool.maxsize=100 +#proxy.grpc.threadpool.queuesize=10 +#proxy.async.timeout=5000 +#proxy.async.coresize=10 +#proxy.async.maxsize=100 +#proxy.grpc.batch.inference.timeout=10000 diff --git a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/route_table.json b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/route_table.json new file mode 100644 index 0000000000..02be84c841 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/route_table.json @@ -0,0 +1,30 @@ +{ + "route_table": { + "default": { + "default": [ + { + "ip": "other-proxy", + "port": 8869 + } + ] + }, + "9999": { + "default": [ + { + "ip": "serving-proxy", + "port": 8059 + } + ], + "serving": [ + { + "ip": "serving-server", + "port": 8000 + } + ] + } + }, + "permission": { + "default_allow": true + } + } + \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-server/conf/serving-server.properties b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-server/conf/serving-server.properties new file mode 100644 index 0000000000..6ef922b7f8 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-server/conf/serving-server.properties @@ -0,0 +1,56 @@ +# +# Copyright 2019 The FATE Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +port=8000 +#serviceRoleName=serving +# cache +#remoteModelInferenceResultCacheSwitch=false +#cache.type=local +#model.cache.path=/data/projects/fate-serving/serving-server +# local cache +#local.cache.maxsize=10000 +#local.cache.expire=30 +#local.cache.interval=3 +# external cache +#redis.ip= +#redis.port= +### configure this parameter to use cluster mode +#redis.cluster.nodes=127.0.0.1:6379,127.0.0.1:6380,127.0.0.1:6381,127.0.0.1:6382,127.0.0.1:6383,127.0.0.1:6384 +### this password is common in stand-alone mode and cluster mode +#redis.password= +#redis.timeout=10 +#redis.expire=3000 +#redis.maxTotal=100 +#redis.maxIdle=100 +# external subsystem +proxy=serving-proxy:8879 +# adapter +feature.single.adaptor=com.webank.ai.fate.serving.adaptor.dataaccess.MockAdapter +feature.batch.adaptor=com.webank.ai.fate.serving.adaptor.dataaccess.MockBatchAdapter +http.adapter.url=http://127.0.0.1:9380/v1/http/adapter/getFeature +# model transfer +model.transfer.url=http://127.0.0.1:9380/v1/model/transfer +# zk router +zk.url=serving-zookeeper:2181 +useRegister=true +useZkRouter=true +# zk acl +#acl.enable=false +#acl.username= +#acl.password= + +# LR algorithm config +#lr.split.size=500 +#lr.use.parallel=false \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/test.sh b/deploy/docker-compose/docker-deploy/test.sh new file mode 100644 index 0000000000..c46a260690 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/test.sh @@ -0,0 +1,182 @@ +#!/bin/bash + +# Copyright 2019-2020 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +BASEDIR=$(dirname "$0") +cd $BASEDIR +WORKINGDIR=$(pwd) + +# fetch fate-python image +source ${WORKINGDIR}/.env +source ${WORKINGDIR}/parties.conf + +cd ${WORKINGDIR} + +get_party_ip(){ + target_party_id=$1 + for ((i = 0; i < ${#partylist[*]}; i++)); do + if [ "${partylist[$i]}" = "$target_party_id" ]; then + target_party_ip=${partyiplist[$i]} + fi + done + return $target_party_ip +} + +Test() { + + while [ "$1" != "" ]; do + case $1 in + toy_example) + shift + if [ "$1" = "" ] || [ "$2" = "" ]; then + echo "No party id was provided, please check your arguments " + echo "Example: " + echo " 'bash test.sh toy_example 9999 10000'" + exit 1 + fi + toy_example $@ + break + ;; + min_test_task) + shift + min_test_task $@ + break + ;; + serving) + shift + serving $@ + break + ;; + esac + shift + done + +} + +toy_example() { + echo "start test toy_example" + guest=$1 + host=$2 + echo "guest_id: "$guest + echo "host_id: "$host + + target_party_id=$1 + echo "target_party_id: "$target_party_id + for ((i = 0; i < ${#party_ip_list[*]}; i++)); do + if [ "${party_list[$i]}" = "$target_party_id" ]; then + target_party_ip=${party_ip_list[$i]} + fi + done + echo "*********************start docker log***************************" + echo $user@$target_party_ip + ssh -tt $user@$target_party_ip < + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/eggroll.properties b/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/eggroll.properties new file mode 100644 index 0000000000..c762c009df --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/eggroll.properties @@ -0,0 +1,70 @@ +# +# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#h2 + +[eggroll] +# core +eggroll.resourcemanager.nodemanager.net.device=eth0 +eggroll.resourcemanager.nodemanager.gpu.num.shell=nvidia.sh +#eggroll.resourcemanager.clustermanager.jdbc.driver.class.name=org.h2.Driver +eggroll.resourcemanager.clustermanager.jdbc.driver.class.name=com.mysql.cj.jdbc.Driver +#eggroll.resourcemanager.clustermanager.jdbc.url=jdbc:h2:./data/meta_h2/eggroll_meta.h2;AUTO_SERVER=TRUE;MODE=MySQL;DATABASE_TO_LOWER=TRUE;SCHEMA=eggroll_meta; +eggroll.resourcemanager.clustermanager.jdbc.url= +eggroll.resourcemanager.clustermanager.jdbc.username= +eggroll.resourcemanager.clustermanager.jdbc.password= + +eggroll.resourcemanager.clustermanager.host= +eggroll.resourcemanager.clustermanager.port= +eggroll.resourcemanager.nodemanager.host= +eggroll.resourcemanager.nodemanager.port= +eggroll.resourcemanager.process.tag= + +# dashboard +eggroll.dashboard.server.port=8083 +eggroll.security.session.expired.time=30 +eggroll.security.login.username=admin +eggroll.security.login.password=admin +eggroll.security.encrypt.private_key= +eggroll.security.encrypt.enable=false + +eggroll.data.dir=/data/projects/fate/eggroll/data/ +eggroll.logs.dir=/data/projects/fate/eggroll/logs/ + +eggroll.bootstrap.root.script=bin/eggroll_boot.sh + +eggroll.resourcemanager.bootstrap.egg_pair.exepath=bin/roll_pair/egg_pair_bootstrap.sh +eggroll.resourcemanager.bootstrap.egg_pair.venv= +eggroll.resourcemanager.bootstrap.egg_pair.pythonpath=python +eggroll.resourcemanager.bootstrap.egg_pair.filepath=python/eggroll/computing/egg_pair/egg_pair.py +eggroll.resourcemanager.bootstrap.egg_pair.ld_library_path= + +# session +eggroll.session.processors.per.node=4 + +# deepspeed +## where deepspeed containers locate, required for deepspeed +#eggroll.resourcemanager.nodemanager.containers.data.dir= +## which python exec that deepspeed container used, fallback to eggpair venv/bin/python +#eggroll.container.python.exec= +## provide by submit option for now +#eggroll.container.deepspeed.script.path= +eggroll.container.deepspeed.distributed.backend=nccl +## defaults to cluster manager endpoint +#eggroll.container.deepspeed.distributed.store.host= +#eggroll.container.deepspeed.distributed.store.port= + + + + diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/log4j2.properties b/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/log4j2.properties new file mode 100644 index 0000000000..03b1bdfd1b --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/log4j2.properties @@ -0,0 +1,108 @@ +# +# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name=PropertiesConfig +property.logDir=${env:EGGROLL_LOG_DIR:-logs/}/${env:EGGROLL_SESSION_ID:-eggroll} +property.logFile=${env:EGGROLL_LOG_FILE:-eggroll} +property.logPattern=[%-5level][%r][%d{yyyy-MM-dd} %d{HH:mm:ss,SSS}][%t,pid:%pid,tid:%T][%c{1.}:%L] - %msg%n +# console +appender.console.type=Console +appender.console.name=STDOUT +appender.console.layout.type=PatternLayout +appender.console.layout.pattern=${logPattern} +# default file +appender.file.type=RollingFile +appender.file.name=LOGFILE +appender.file.fileName=${logDir}/${logFile}.jvm.log +appender.file.filePattern=${logDir}/%d{yyyy}/%d{MM}/%d{dd}/${logFile}.jvm.log.%d{yyyy-MM-dd-HH} +appender.file.layout.type=PatternLayout +appender.file.layout.pattern=${logPattern} +appender.file.policies.type=Policies +appender.file.policies.time.type=TimeBasedTriggeringPolicy +appender.file.policies.time.interval=1 +appender.file.policies.time.modulate=true +appender.file.strategy.type=DefaultRolloverStrategy +# error file +appender.errorlog.type=RollingFile +appender.errorlog.name=ERRORLOG +appender.errorlog.fileName=${logDir}/${logFile}.jvm.err.log +appender.errorlog.filePattern=${logDir}/%d{yyyy}/%d{MM}/%d{dd}/${logFile}.jvm.err.log.%d{yyyy-MM-dd-HH} +appender.errorlog.layout.type=PatternLayout +appender.errorlog.layout.pattern=${logPattern} +appender.errorlog.policies.type=Policies +appender.errorlog.policies.time.type=TimeBasedTriggeringPolicy +appender.errorlog.policies.time.interval=1 +appender.errorlog.policies.time.modulate=true +appender.errorlog.strategy.type=DefaultRolloverStrategy +# audit +appender.audit.type=RollingFile +appender.audit.name=LOGAUDIT +appender.audit.fileName=${logDir}/${logFile}-audit.log +appender.audit.filePattern=${logDir}/%d{yyyy}/%d{MM}/%d{dd}/${logFile}-audit.log.%d{yyyy-MM-dd-HH} +appender.audit.layout.type=PatternLayout +appender.audit.layout.pattern=${logPattern} +appender.audit.policies.type=Policies +appender.audit.policies.time.type=TimeBasedTriggeringPolicy +appender.audit.policies.time.interval=1 +appender.audit.policies.time.modulate=true +appender.audit.strategy.type=DefaultRolloverStrategy + + +# loggers +loggers=file, netty, audit, httpclient, httpclientwire + +# logger - file +logger.file.name=file +logger.file.level=${env:EGGROLL_LOG_LEVEL:-INFO} +logger.file.appenderRefs=file +logger.file.appenderRef.file.ref=LOGFILE +logger.file.additivity=false + +# logger - error +logger.errorlog.name=errorlog +logger.errorlog.level=ERROR +logger.errorlog.appenderRefs=errorlog +logger.errorlog.appenderRef.file.ref=LOGERROR +logger.errorlog.additivity=false + +# logger - root +rootLogger.level=${env:EGGROLL_LOG_LEVEL:-INFO} +rootLogger.appenderRefs=file, stdout, errorlog +rootLogger.appenderRef.file.ref=LOGFILE +rootLogger.appenderRef.errorlog.ref=ERRORLOG +rootLogger.appenderRef.errorlog.level=ERROR + +# Uncomment the following line if you always want logs on console. +# Otherwise you can enable it by setting EGGROLL_LOG_LEVEL<=DEBUG or EGGROLL_LOG_CONSOLE=1 in system env +#rootLogger.appenderRef.stdout.ref=STDOUT + +# logger - netty +logger.netty.name=io.grpc.netty +logger.netty.level=INFO + +# logger - audit +logger.audit.name=audit +logger.audit.level=info +logger.audit.appenderRefs=audit +logger.audit.appenderRef.file.ref=LOGAUDIT +logger.audit.additivity=false + +# logger - HttpClient +logger.httpclient.name=org.apache.commons.httpclient +logger.httpclient.level=INFO + +logger.httpclientwire.name=httpclient.wire +logger.httpclientwire.level=INFO + diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/node-extend-env.properties b/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/node-extend-env.properties new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/route_table.json b/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/route_table.json new file mode 100644 index 0000000000..38f08b1432 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/route_table.json @@ -0,0 +1,28 @@ + +{ + "route_table": + { + "10001": + { + "default":[ + { + "port": 9370, + "ip": "127.0.0.1" + } + ] + }, + "10002": + { + "default":[ + { + "port": 9470, + "ip": "127.0.0.1" + } + ] + } + }, + "permission": + { + "default_allow": true + } +} \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/whitelist.json b/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/whitelist.json new file mode 100644 index 0000000000..9a8230fd1c --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/eggroll/conf/whitelist.json @@ -0,0 +1,245 @@ +{ + "builtins": [ + "int", + "list", + "set", + "slice" + ], + "collections": [ + "defaultdict", + "OrderedDict" + ], + "eggroll.core.transfer_model": [ + "ErRollSiteHeader" + ], + "eggroll.roll_pair.task.storage": [ + "BSS" + ], + "federatedml.cipher_compressor.compressor": [ + "PackingCipherTensor", + "NormalCipherPackage", + "PackingCipherTensorPackage" + ], + "federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_histogram": [ + "HistogramBag", + "FeatureHistogramWeights" + ], + "federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_importance": [ + "FeatureImportance" + ], + "federatedml.ensemble.basic_algorithms.decision_tree.tree_core.g_h_optim": [ + "SplitInfoPackage" + ], + "federatedml.ensemble.basic_algorithms.decision_tree.tree_core.node": [ + "Node" + ], + "federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter": [ + "SplitInfo" + ], + "federatedml.evaluation.performance_recorder": [ + "PerformanceRecorder" + ], + "federatedml.feature.binning.bin_result": [ + "BinColResults" + ], + "federatedml.feature.binning.optimal_binning.bucket_info": [ + "Bucket" + ], + "federatedml.feature.binning.optimal_binning.heap": [ + "MinHeap", + "IvHeapNode", + "GiniHeapNode", + "ChiSquareHeapNode" + ], + "federatedml.feature.binning.quantile_summaries": [ + "SparseQuantileSummaries", + "Stats", + "QuantileSummaries" + ], + "federatedml.feature.fate_element_type": [ + "NoneType" + ], + "federatedml.feature.homo_feature_binning.homo_binning_base": [ + "SplitPointNode" + ], + "federatedml.feature.instance": [ + "Instance" + ], + "federatedml.feature.one_hot_encoder": [ + "TransferPair" + ], + "federatedml.feature.sparse_vector": [ + "SparseVector" + ], + "federatedml.framework.weights": [ + "NumpyWeights", + "TransferableWeights", + "NumericWeights", + "ListWeights", + "DictWeights", + "OrderDictWeights" + ], + "federatedml.linear_model.linear_model_weight": [ + "LinearModelWeights" + ], + "federatedml.secureprotol.fixedpoint": [ + "FixedPointNumber" + ], + "federatedml.secureprotol.number_theory.field.integers_modulo_prime_field": [ + "IntegersModuloPrimeElement" + ], + "federatedml.secureprotol.number_theory.group.twisted_edwards_curve_group": [ + "TwistedEdwardsCurveElement" + ], + "federatedml.secureprotol.symmetric_encryption.cryptor_executor": [ + "CryptoExecutor" + ], + "federatedml.secureprotol.symmetric_encryption.pohlig_hellman_encryption": [ + "PohligHellmanCiphertext", + "PohligHellmanCipherKey" + ], + "federatedml.statistic.intersect.intersect_preprocess": [ + "BitArray" + ], + "federatedml.statistic.statics": [ + "SummaryStatistics" + ], + "gmpy2": [ + "from_binary" + ], + "numpy": [ + "ndarray", + "dtype" + ], + "numpy.core.multiarray": [ + "scalar", + "_reconstruct" + ], + "numpy.core.numeric": [ + "_frombuffer" + ], + "tensorflow.python.framework.ops": [ + "convert_to_tensor" + ], + "torch._utils": [ + "_rebuild_tensor_v2" + ], + "torch.storage": [ + "_load_from_bytes" + ], + "ipcl_python.bindings.ipcl_bindings": [ + "ipclPublicKey" + ], + "ipcl_python.ipcl_python": [ + "PaillierPublicKey", + "PaillierEncryptedNumber" + ], + "torch": [ + "Size" + ], + "fate.arch.tensor.storage.local.device.cpu.plain": [ + "_TorchStorage" + ], + "fate.arch.tensor.types._dtype": [ + "dtype" + ], + "fate.arch.tensor.types._shape": [ + "DAxis", + "Shape" + ], + "pandas.core.frame": [ + "DataFrame" + ], + "pandas.core.indexes.base": [ + "Index", + "_new_Index" + ], + "pandas.core.indexes.range": [ + "RangeIndex" + ], + "pandas.core.series": [ + "Series" + ], + "pandas.core.internals.managers": [ + "BlockManager", + "SingleBlockManager" + ], + "fate.arch.dataframe.manager.data_manager": [ + "DataManager" + ], + "fate.arch.dataframe.manager.schema_manager": [ + "SchemaManager", + "Schema" + ], + "fate.arch.dataframe.manager.block_manager":[ + "BlockManager", + "IndexBlock", + "BlockType", + "Int64Block", + "Float32Block", + "Float64Block", + "Int32Block", + "BoolBlock", + "NPObjectBlock", + "PHETensorBlock" + ], + "fate.arch.tensor.inside._op_quantile":[ + "GKSummary" + ], + "fate.arch.protocol.phe.paillier":[ + "Coder", + "SK", + "PK", + "evaluator" + ], + "fate.arch.protocol.phe.ou":[ + "Coder", + "SK", + "PK", + "evaluator" + ], + "fate.arch.tensor.phe._tensor":[ + "PHETensorEncoded", "PHETensor" + ], + "fate.arch.tensor.phe._keypair":[ + "PHETensorCoder" + ], + "fate_utils.quantile":[ + "QuantileSummaryStream" + ], + "fate_utils.paillier":[ + "Coder","Coders", "FixedpointVector", "PK", "FixedpointPaillierVector", "CiphertextVector","PlaintextVector" + ], + "fate_utils.ou":[ + "Coder", "Coders", "FixedpointVector", "PK", "FixedpointPaillierVector", "CiphertextVector","PlaintextVector" + ], + "fate.arch.unify._infra_def":[ + "device" + ], + "fate.arch.histogram._histogram_splits": [ + "HistogramSplits" + ], + "fate.arch.histogram.values._values": [ + "HistogramValuesContainer" + ], + "fate.arch.histogram.values._plain": [ + "HistogramPlainValues" + ], + "fate.arch.histogram.values._cipher":[ + "HistogramEncryptedValues" + ], + "fate.arch.protocol.phe.mock": [ + "PK", "SK", "FV", "EV", "Coder", "evaluator" + ], + "fate.arch.histogram.histogram":[ + "HistogramSplits", "HistogramPlainValues", "HistogramEncryptedValues" + ], + "torch":[ + "float32", + "int64", + "int32", + "device", + "float64", + "Size" + ] +} diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/core-site.xml b/deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/core-site.xml new file mode 100644 index 0000000000..77195970e1 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/core-site.xml @@ -0,0 +1,7 @@ + + + + fs.default.name + hdfs://0.0.0.0:9000 + + diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/hadoop.env b/deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/hadoop.env new file mode 100644 index 0000000000..95b3d10289 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/hadoop.env @@ -0,0 +1,43 @@ +CORE_CONF_fs_defaultFS=hdfs://namenode:9000 +CORE_CONF_hadoop_http_staticuser_user=root +CORE_CONF_hadoop_proxyuser_hue_hosts=* +CORE_CONF_hadoop_proxyuser_hue_groups=* +CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec + +HDFS_CONF_dfs_webhdfs_enabled=true +HDFS_CONF_dfs_permissions_enabled=false +HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false + +YARN_CONF_yarn_log___aggregation___enable=true +YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/ +YARN_CONF_yarn_resourcemanager_recovery_enabled=true +YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore +YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler +YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=8192 +YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=4 +YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate +YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true +YARN_CONF_yarn_resourcemanager_hostname=resourcemanager +YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032 +YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030 +YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031 +YARN_CONF_yarn_timeline___service_enabled=true +YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true +YARN_CONF_yarn_timeline___service_hostname=historyserver +YARN_CONF_mapreduce_map_output_compress=true +YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec +YARN_CONF_yarn_nodemanager_resource_memory___mb=16384 +YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8 +YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5 +YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs +YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle + +MAPRED_CONF_mapreduce_framework_name=yarn +MAPRED_CONF_mapred_child_java_opts=-Xmx4096m +MAPRED_CONF_mapreduce_map_memory_mb=4096 +MAPRED_CONF_mapreduce_reduce_memory_mb=8192 +MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m +MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m +MAPRED_CONF_yarn_app_mapreduce_am_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/ +MAPRED_CONF_mapreduce_map_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/ +MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/ diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/nginx.conf b/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/nginx.conf new file mode 100644 index 0000000000..e448fd2863 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/nginx.conf @@ -0,0 +1,68 @@ + +#user nobody; +worker_processes 2; + +#error_log logs/error.log; +#error_log logs/error.log notice; +error_log /dev/stdout info; +error_log /dev/stderr error; + +#pid logs/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + include mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" "$http_host" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" ' + '$upstream_status $upstream_addr ' + '$request_time $upstream_response_time' + ; + + access_log logs/access.log main; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + underscores_in_headers on; + + #gzip on; + lua_package_path "$prefix/lua/?.lua;;"; + init_worker_by_lua_file 'lua/initialize.lua'; + + upstream http_cluster { + server fateflow:9380; # just an invalid address as a place holder + balancer_by_lua_file 'lua/balancer.lua'; + } + + upstream grpc_cluster { + server fateflow:9360; # just an invalid address as a place holder + balancer_by_lua_file 'lua/balancer.lua'; + } + + include vhost/*.conf; +} + +stream { + log_format tcp_proxy '$remote_addr [$time_local] ' + '$protocol $status $bytes_sent $bytes_received ' + '$session_time "$upstream_addr" ' + '"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"'; + + access_log logs/tcp-access.log tcp_proxy; + + server { + listen 9128; + proxy_connect_timeout 1s; + proxy_timeout 3s; + proxy_pass 127.0.0.1:3128; + } +} diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/route_table.yaml b/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/route_table.yaml new file mode 100644 index 0000000000..57ae70325a --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/route_table.yaml @@ -0,0 +1,27 @@ +default: + proxy: + - host: 127.0.0.1 + http_port: 9300 + grpc_port: 9310 +local: + test_proxy: + - host: 127.0.0.1 + http_port: 9302 +9999: + proxy: + - host: 127.0.0.1 + http_port: 9300 + grpc_port: 9310 + fateflow: + - host: 127.0.0.1 + http_port: 9380 + grpc_port: 9360 +10000: + proxy: + - host: 127.0.0.1 + http_port: 9300 + grpc_port: 9310 + fateflow: + - host: 127.0.0.1 + http_port: 9380 + grpc_port: 9360 diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/pulsar/standalone.conf b/deploy/docker-compose/docker-deploy/training_template/backends/spark/pulsar/standalone.conf new file mode 100644 index 0000000000..89793c753d --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/spark/pulsar/standalone.conf @@ -0,0 +1,899 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +### --- General broker settings --- ### + +# Message Size +maxMessageSize=134217728 + +# Zookeeper quorum connection string +zookeeperServers= + +# Configuration Store connection string +configurationStoreServers= + +brokerServicePort=6650 + + +# Port to use to server HTTP request +webServicePort=8080 + +# Hostname or IP address the service binds on, default is 0.0.0.0. +bindAddress=0.0.0.0 + +# Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getHostName() is used. +advertisedAddress= + +# Number of threads to use for Netty IO. Default is set to 2 * Runtime.getRuntime().availableProcessors() +numIOThreads= + +# Number of threads to use for ordered executor. The ordered executor is used to operate with zookeeper, +# such as init zookeeper client, get namespace policies from zookeeper etc. It also used to split bundle. Default is 8 +numOrderedExecutorThreads=8 + +# Number of threads to use for HTTP requests processing. Default is set to 2 * Runtime.getRuntime().availableProcessors() +numHttpServerThreads= + +# Number of thread pool size to use for pulsar broker service. +# The executor in thread pool will do basic broker operation like load/unload bundle, update managedLedgerConfig, +# update topic/subscription/replicator message dispatch rate, do leader election etc. +# Default is Runtime.getRuntime().availableProcessors() +numExecutorThreadPoolSize= + +# Number of thread pool size to use for pulsar zookeeper callback service +# The cache executor thread pool is used for restarting global zookeeper session. +# Default is 10 +numCacheExecutorThreadPoolSize=10 + +# Max concurrent web requests +maxConcurrentHttpRequests=1024 + +# Name of the cluster to which this broker belongs to +clusterName=standalone + +# Enable cluster's failure-domain which can distribute brokers into logical region +failureDomainsEnabled=false + +# Zookeeper session timeout in milliseconds +zooKeeperSessionTimeoutMillis=30000 + +# ZooKeeper operation timeout in seconds +zooKeeperOperationTimeoutSeconds=30 + +# ZooKeeper cache expiry time in seconds +zooKeeperCacheExpirySeconds=300 + +# Time to wait for broker graceful shutdown. After this time elapses, the process will be killed +brokerShutdownTimeoutMs=60000 + +# Flag to skip broker shutdown when broker handles Out of memory error +skipBrokerShutdownOnOOM=false + +# Enable backlog quota check. Enforces action on topic when the quota is reached +backlogQuotaCheckEnabled=true + +# How often to check for topics that have reached the quota +backlogQuotaCheckIntervalInSeconds=60 + +# Default per-topic backlog quota limit +backlogQuotaDefaultLimitGB=10 + +# Default ttl for namespaces if ttl is not already configured at namespace policies. (disable default-ttl with value 0) +ttlDurationDefaultInSeconds=0 + +# Enable the deletion of inactive topics +brokerDeleteInactiveTopicsEnabled=true + +# How often to check for inactive topics +brokerDeleteInactiveTopicsFrequencySeconds=60 + +# Max pending publish requests per connection to avoid keeping large number of pending +# requests in memory. Default: 1000 +maxPendingPublishdRequestsPerConnection=1000 + +# How frequently to proactively check and purge expired messages +messageExpiryCheckIntervalInMinutes=5 + +# How long to delay rewinding cursor and dispatching messages when active consumer is changed +activeConsumerFailoverDelayTimeMillis=1000 + +# How long to delete inactive subscriptions from last consuming +# When it is 0, inactive subscriptions are not deleted automatically +subscriptionExpirationTimeMinutes=0 + +# Enable subscription message redelivery tracker to send redelivery count to consumer (default is enabled) +subscriptionRedeliveryTrackerEnabled=true + +# On KeyShared subscriptions, with default AUTO_SPLIT mode, use splitting ranges or +# consistent hashing to reassign keys to new consumers +subscriptionKeySharedUseConsistentHashing=false + +# On KeyShared subscriptions, number of points in the consistent-hashing ring. +# The higher the number, the more equal the assignment of keys to consumers +subscriptionKeySharedConsistentHashingReplicaPoints=100 + +# How frequently to proactively check and purge expired subscription +subscriptionExpiryCheckIntervalInMinutes=5 + +# Set the default behavior for message deduplication in the broker +# This can be overridden per-namespace. If enabled, broker will reject +# messages that were already stored in the topic +brokerDeduplicationEnabled=false + +# Maximum number of producer information that it's going to be +# persisted for deduplication purposes +brokerDeduplicationMaxNumberOfProducers=10000 + +# Number of entries after which a dedup info snapshot is taken. +# A bigger interval will lead to less snapshots being taken though it would +# increase the topic recovery time, when the entries published after the +# snapshot need to be replayed +brokerDeduplicationEntriesInterval=1000 + +# Time of inactivity after which the broker will discard the deduplication information +# relative to a disconnected producer. Default is 6 hours. +brokerDeduplicationProducerInactivityTimeoutMinutes=360 + +# When a namespace is created without specifying the number of bundle, this +# value will be used as the default +defaultNumberOfNamespaceBundles=4 + +# Enable check for minimum allowed client library version +clientLibraryVersionCheckEnabled=false + +# Path for the file used to determine the rotation status for the broker when responding +# to service discovery health checks +statusFilePath=/usr/local/apache/htdocs + +# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending +# messages to consumer once, this limit reaches until consumer starts acknowledging messages back +# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction +maxUnackedMessagesPerConsumer=50000 + +# Max number of unacknowledged messages allowed per shared subscription. Broker will stop dispatching messages to +# all consumers of the subscription once this limit reaches until consumer starts acknowledging messages back and +# unack count reaches to limit/2. Using a value of 0, is disabling unackedMessage-limit +# check and dispatcher can dispatch messages without any restriction +maxUnackedMessagesPerSubscription=200000 + +# Max number of unacknowledged messages allowed per broker. Once this limit reaches, broker will stop dispatching +# messages to all shared subscription which has higher number of unack messages until subscriptions start +# acknowledging messages back and unack count reaches to limit/2. Using a value of 0, is disabling +# unackedMessage-limit check and broker doesn't block dispatchers +maxUnackedMessagesPerBroker=0 + +# Once broker reaches maxUnackedMessagesPerBroker limit, it blocks subscriptions which has higher unacked messages +# than this percentage limit and subscription will not receive any new messages until that subscription acks back +# limit/2 messages +maxUnackedMessagesPerSubscriptionOnBrokerBlocked=0.16 + +# Tick time to schedule task that checks topic publish rate limiting across all topics +# Reducing to lower value can give more accuracy while throttling publish but +# it uses more CPU to perform frequent check. (Disable publish throttling with value 0) +topicPublisherThrottlingTickTimeMillis=2 + +# Tick time to schedule task that checks broker publish rate limiting across all topics +# Reducing to lower value can give more accuracy while throttling publish but +# it uses more CPU to perform frequent check. (Disable publish throttling with value 0) +brokerPublisherThrottlingTickTimeMillis=50 + +# Max Rate(in 1 seconds) of Message allowed to publish for a broker if broker publish rate limiting enabled +# (Disable message rate limit with value 0) +brokerPublisherThrottlingMaxMessageRate=0 + +# Max Rate(in 1 seconds) of Byte allowed to publish for a broker if broker publish rate limiting enabled +# (Disable byte rate limit with value 0) +brokerPublisherThrottlingMaxByteRate=0 + +# Default messages per second dispatch throttling-limit for every topic. Using a value of 0, is disabling default +# message dispatch-throttling +dispatchThrottlingRatePerTopicInMsg=0 + +# Default bytes per second dispatch throttling-limit for every topic. Using a value of 0, is disabling +# default message-byte dispatch-throttling +dispatchThrottlingRatePerTopicInByte=0 + +# Dispatch rate-limiting relative to publish rate. +# (Enabling flag will make broker to dynamically update dispatch-rate relatively to publish-rate: +# throttle-dispatch-rate = (publish-rate + configured dispatch-rate). +dispatchThrottlingRateRelativeToPublishRate=false + +# By default we enable dispatch-throttling for both caught up consumers as well as consumers who have +# backlog. +dispatchThrottlingOnNonBacklogConsumerEnabled=true + +# Precise dispathcer flow control according to history message number of each entry +preciseDispatcherFlowControl=false + +# Max number of concurrent lookup request broker allows to throttle heavy incoming lookup traffic +maxConcurrentLookupRequest=50000 + +# Max number of concurrent topic loading request broker allows to control number of zk-operations +maxConcurrentTopicLoadRequest=5000 + +# Max concurrent non-persistent message can be processed per connection +maxConcurrentNonPersistentMessagePerConnection=1000 + +# Number of worker threads to serve non-persistent topic +numWorkerThreadsForNonPersistentTopic=8 + +# Enable broker to load persistent topics +enablePersistentTopics=true + +# Enable broker to load non-persistent topics +enableNonPersistentTopics=true + +# Max number of producers allowed to connect to topic. Once this limit reaches, Broker will reject new producers +# until the number of connected producers decrease. +# Using a value of 0, is disabling maxProducersPerTopic-limit check. +maxProducersPerTopic=0 + +# Enforce producer to publish encrypted messages.(default disable). +encryptionRequireOnProducer=false + +# Max number of consumers allowed to connect to topic. Once this limit reaches, Broker will reject new consumers +# until the number of connected consumers decrease. +# Using a value of 0, is disabling maxConsumersPerTopic-limit check. +maxConsumersPerTopic=0 + +# Max number of subscriptions allowed to subscribe to topic. Once this limit reaches, broker will reject +# new subscription until the number of subscribed subscriptions decrease. +# Using a value of 0, is disabling maxSubscriptionsPerTopic limit check. +maxSubscriptionsPerTopic=0 + +# Max number of consumers allowed to connect to subscription. Once this limit reaches, Broker will reject new consumers +# until the number of connected consumers decrease. +# Using a value of 0, is disabling maxConsumersPerSubscription-limit check. +maxConsumersPerSubscription=0 + +# Max number of partitions per partitioned topic +# Use 0 or negative number to disable the check +maxNumPartitionsPerPartitionedTopic=0 + +### --- TLS --- ### +# Deprecated - Use webServicePortTls and brokerServicePortTls instead +tlsEnabled=false + +# Tls cert refresh duration in seconds (set 0 to check on every new connection) +tlsCertRefreshCheckDurationSec=300 + +# Path for the TLS certificate file +tlsCertificateFilePath= + +# Path for the TLS private key file +tlsKeyFilePath= + +# Path for the trusted TLS certificate file. +# This cert is used to verify that any certs presented by connecting clients +# are signed by a certificate authority. If this verification +# fails, then the certs are untrusted and the connections are dropped. +tlsTrustCertsFilePath= + +# Accept untrusted TLS certificate from client. +# If true, a client with a cert which cannot be verified with the +# 'tlsTrustCertsFilePath' cert will allowed to connect to the server, +# though the cert will not be used for client authentication. +tlsAllowInsecureConnection=false + +# Specify the tls protocols the broker will use to negotiate during TLS handshake +# (a comma-separated list of protocol names). +# Examples:- [TLSv1.2, TLSv1.1, TLSv1] +tlsProtocols= + +# Specify the tls cipher the broker will use to negotiate during TLS Handshake +# (a comma-separated list of ciphers). +# Examples:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] +tlsCiphers= + +# Trusted client certificates are required for to connect TLS +# Reject the Connection if the Client Certificate is not trusted. +# In effect, this requires that all connecting clients perform TLS client +# authentication. +tlsRequireTrustedClientCertOnConnect=false + +### --- KeyStore TLS config variables --- ### +# Enable TLS with KeyStore type configuration in broker. +tlsEnabledWithKeyStore=false + +# TLS Provider for KeyStore type +tlsProvider= + +# TLS KeyStore type configuration in broker: JKS, PKCS12 +tlsKeyStoreType=JKS + +# TLS KeyStore path in broker +tlsKeyStore= + +# TLS KeyStore password for broker +tlsKeyStorePassword= + +# TLS TrustStore type configuration in broker: JKS, PKCS12 +tlsTrustStoreType=JKS + +# TLS TrustStore path in broker +tlsTrustStore= + +# TLS TrustStore password for broker +tlsTrustStorePassword= + +# Whether internal client use KeyStore type to authenticate with Pulsar brokers +brokerClientTlsEnabledWithKeyStore=false + +# The TLS Provider used by internal client to authenticate with other Pulsar brokers +brokerClientSslProvider= + +# TLS TrustStore type configuration for internal client: JKS, PKCS12 +# used by the internal client to authenticate with Pulsar brokers +brokerClientTlsTrustStoreType=JKS + +# TLS TrustStore path for internal client +# used by the internal client to authenticate with Pulsar brokers +brokerClientTlsTrustStore= + +# TLS TrustStore password for internal client, +# used by the internal client to authenticate with Pulsar brokers +brokerClientTlsTrustStorePassword= + +# Specify the tls cipher the internal client will use to negotiate during TLS Handshake +# (a comma-separated list of ciphers) +# e.g. [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]. +# used by the internal client to authenticate with Pulsar brokers +brokerClientTlsCiphers= + +# Specify the tls protocols the broker will use to negotiate during TLS handshake +# (a comma-separated list of protocol names). +# e.g. [TLSv1.2, TLSv1.1, TLSv1] +# used by the internal client to authenticate with Pulsar brokers +brokerClientTlsProtocols= + +# Enable or disable system topic +systemTopicEnabled=false + +# Enable or disable topic level policies, topic level policies depends on the system topic +# Please enable the system topic first. +topicLevelPoliciesEnabled=false + +# If a topic remains fenced for this number of seconds, it will be closed forcefully. +# If it is set to 0 or a negative number, the fenced topic will not be closed. +topicFencingTimeoutSeconds=0 + +### --- Authentication --- ### +# Role names that are treated as "proxy roles". If the broker sees a request with +#role as proxyRoles - it will demand to see a valid original principal. +proxyRoles= + +# If this flag is set then the broker authenticates the original Auth data +# else it just accepts the originalPrincipal and authorizes it (if required). +authenticateOriginalAuthData=false + +# Enable authentication +authenticationEnabled=false + +# Autentication provider name list, which is comma separated list of class names +authenticationProviders= + +# Enforce authorization +authorizationEnabled=false + +# Authorization provider fully qualified class-name +authorizationProvider=org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider + +# Allow wildcard matching in authorization +# (wildcard matching only applicable if wildcard-char: +# * presents at first or last position eg: *.pulsar.service, pulsar.service.*) +authorizationAllowWildcardsMatching=false + +# Role names that are treated as "super-user", meaning they will be able to do all admin +# operations and publish/consume from all topics +superUserRoles= + +# Authentication settings of the broker itself. Used when the broker connects to other brokers, +# either in same or other clusters +brokerClientAuthenticationPlugin= +brokerClientAuthenticationParameters= + +# Supported Athenz provider domain names(comma separated) for authentication +athenzDomainNames= + +# When this parameter is not empty, unauthenticated users perform as anonymousUserRole +anonymousUserRole= + +# The token "claim" that will be interpreted as the authentication "role" or "principal" by AuthenticationProviderToken (defaults to "sub" if blank) +tokenAuthClaim= + +# The token audience "claim" name, e.g. "aud", that will be used to get the audience from token. +# If not set, audience will not be verified. +tokenAudienceClaim= + +# The token audience stands for this broker. The field `tokenAudienceClaim` of a valid token, need contains this. +tokenAudience= + +### --- BookKeeper Client --- ### + +# Authentication plugin to use when connecting to bookies +bookkeeperClientAuthenticationPlugin= + +# BookKeeper auth plugin implementatation specifics parameters name and values +bookkeeperClientAuthenticationParametersName= +bookkeeperClientAuthenticationParameters= + +# Timeout for BK add / read operations +bookkeeperClientTimeoutInSeconds=30 + +# Speculative reads are initiated if a read request doesn't complete within a certain time +# Using a value of 0, is disabling the speculative reads +bookkeeperClientSpeculativeReadTimeoutInMillis=0 + +# Number of channels per bookie +bookkeeperNumberOfChannelsPerBookie=16 + +# Enable bookies health check. Bookies that have more than the configured number of failure within +# the interval will be quarantined for some time. During this period, new ledgers won't be created +# on these bookies +bookkeeperClientHealthCheckEnabled=true +bookkeeperClientHealthCheckIntervalSeconds=60 +bookkeeperClientHealthCheckErrorThresholdPerInterval=5 +bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800 + +#bookie quarantine ratio to avoid all clients quarantine the high pressure bookie servers at the same time +bookkeeperClientQuarantineRatio=1.0 + +# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when +# forming a new bookie ensemble +# This parameter related to ensemblePlacementPolicy in conf/bookkeeper.conf, if enabled, ensemblePlacementPolicy +# should be set to org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy +bookkeeperClientRackawarePolicyEnabled=true + +# Enable region-aware bookie selection policy. BK will chose bookies from +# different regions and racks when forming a new bookie ensemble. +# If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored +# This parameter related to ensemblePlacementPolicy in conf/bookkeeper.conf, if enabled, ensemblePlacementPolicy +# should be set to org.apache.bookkeeper.client.RegionAwareEnsemblePlacementPolicy +bookkeeperClientRegionawarePolicyEnabled=false + +# Minimum number of racks per write quorum. BK rack-aware bookie selection policy will try to +# get bookies from at least 'bookkeeperClientMinNumRacksPerWriteQuorum' racks for a write quorum. +bookkeeperClientMinNumRacksPerWriteQuorum=1 + +# Enforces rack-aware bookie selection policy to pick bookies from 'bookkeeperClientMinNumRacksPerWriteQuorum' +# racks for a writeQuorum. +# If BK can't find bookie then it would throw BKNotEnoughBookiesException instead of picking random one. +bookkeeperClientEnforceMinNumRacksPerWriteQuorum=false + +# Enable/disable reordering read sequence on reading entries. +bookkeeperClientReorderReadSequenceEnabled=false + +# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie +# outside the specified groups will not be used by the broker +bookkeeperClientIsolationGroups= + +# Enable bookie secondary-isolation group if bookkeeperClientIsolationGroups doesn't +# have enough bookie available. +bookkeeperClientSecondaryIsolationGroups= + +# Minimum bookies that should be available as part of bookkeeperClientIsolationGroups +# else broker will include bookkeeperClientSecondaryIsolationGroups bookies in isolated list. +bookkeeperClientMinAvailableBookiesInIsolationGroups= + +# Set the client security provider factory class name. +# Default: org.apache.bookkeeper.tls.TLSContextFactory +bookkeeperTLSProviderFactoryClass=org.apache.bookkeeper.tls.TLSContextFactory + +# Enable tls authentication with bookie +bookkeeperTLSClientAuthentication=false + +# Supported type: PEM, JKS, PKCS12. Default value: PEM +bookkeeperTLSKeyFileType=PEM + +#Supported type: PEM, JKS, PKCS12. Default value: PEM +bookkeeperTLSTrustCertTypes=PEM + +# Path to file containing keystore password, if the client keystore is password protected. +bookkeeperTLSKeyStorePasswordPath= + +# Path to file containing truststore password, if the client truststore is password protected. +bookkeeperTLSTrustStorePasswordPath= + +# Path for the TLS private key file +bookkeeperTLSKeyFilePath= + +# Path for the TLS certificate file +bookkeeperTLSCertificateFilePath= + +# Path for the trusted TLS certificate file +bookkeeperTLSTrustCertsFilePath= + +# Enable/disable disk weight based placement. Default is false +bookkeeperDiskWeightBasedPlacementEnabled=false + +# Set the interval to check the need for sending an explicit LAC +# A value of '0' disables sending any explicit LACs. Default is 0. +bookkeeperExplicitLacIntervalInMills=0 + +# Use older Bookkeeper wire protocol with bookie +bookkeeperUseV2WireProtocol=true + +# Expose bookkeeper client managed ledger stats to prometheus. default is false +# bookkeeperClientExposeStatsToPrometheus=false + +### --- Managed Ledger --- ### + +# Number of bookies to use when creating a ledger +managedLedgerDefaultEnsembleSize=1 + +# Number of copies to store for each message +managedLedgerDefaultWriteQuorum=1 + +# Number of guaranteed copies (acks to wait before write is complete) +managedLedgerDefaultAckQuorum=1 + +# How frequently to flush the cursor positions that were accumulated due to rate limiting. (seconds). +# Default is 60 seconds +managedLedgerCursorPositionFlushSeconds = 60 + +# Default type of checksum to use when writing to BookKeeper. Default is "CRC32C" +# Other possible options are "CRC32", "MAC" or "DUMMY" (no checksum). +managedLedgerDigestType=CRC32C + +# Number of threads to be used for managed ledger tasks dispatching +managedLedgerNumWorkerThreads=4 + +# Number of threads to be used for managed ledger scheduled tasks +managedLedgerNumSchedulerThreads=4 + +# Amount of memory to use for caching data payload in managed ledger. This memory +# is allocated from JVM direct memory and it's shared across all the topics +# running in the same broker. By default, uses 1/5th of available direct memory +managedLedgerCacheSizeMB= + +# Whether we should make a copy of the entry payloads when inserting in cache +managedLedgerCacheCopyEntries=false + +# Threshold to which bring down the cache level when eviction is triggered +managedLedgerCacheEvictionWatermark=0.9 + +# Configure the cache eviction frequency for the managed ledger cache (evictions/sec) +managedLedgerCacheEvictionFrequency=100.0 + +# All entries that have stayed in cache for more than the configured time, will be evicted +managedLedgerCacheEvictionTimeThresholdMillis=1000 + +# Configure the threshold (in number of entries) from where a cursor should be considered 'backlogged' +# and thus should be set as inactive. +managedLedgerCursorBackloggedThreshold=1000 + +# Rate limit the amount of writes generated by consumer acking the messages +managedLedgerDefaultMarkDeleteRateLimit=0.1 + +# Max number of entries to append to a ledger before triggering a rollover +# A ledger rollover is triggered on these conditions +# * Either the max rollover time has been reached +# * or max entries have been written to the ledged and at least min-time +# has passed +managedLedgerMaxEntriesPerLedger=50000 + +# Minimum time between ledger rollover for a topic +managedLedgerMinLedgerRolloverTimeMinutes=10 + +# Maximum time before forcing a ledger rollover for a topic +managedLedgerMaxLedgerRolloverTimeMinutes=240 + +# Max number of entries to append to a cursor ledger +managedLedgerCursorMaxEntriesPerLedger=50000 + +# Max time before triggering a rollover on a cursor ledger +managedLedgerCursorRolloverTimeInSeconds=14400 + +# Maximum ledger size before triggering a rollover for a topic (MB) +managedLedgerMaxSizePerLedgerMbytes=2048 + +# Max number of "acknowledgment holes" that are going to be persistently stored. +# When acknowledging out of order, a consumer will leave holes that are supposed +# to be quickly filled by acking all the messages. The information of which +# messages are acknowledged is persisted by compressing in "ranges" of messages +# that were acknowledged. After the max number of ranges is reached, the information +# will only be tracked in memory and messages will be redelivered in case of +# crashes. +managedLedgerMaxUnackedRangesToPersist=10000 + +# Max number of "acknowledgment holes" that can be stored in Zookeeper. If number of unack message range is higher +# than this limit then broker will persist unacked ranges into bookkeeper to avoid additional data overhead into +# zookeeper. +managedLedgerMaxUnackedRangesToPersistInZooKeeper=1000 + +# Skip reading non-recoverable/unreadable data-ledger under managed-ledger's list. It helps when data-ledgers gets +# corrupted at bookkeeper and managed-cursor is stuck at that ledger. +autoSkipNonRecoverableData=false + +# operation timeout while updating managed-ledger metadata. +managedLedgerMetadataOperationsTimeoutSeconds=60 + +# Read entries timeout when broker tries to read messages from bookkeeper. +managedLedgerReadEntryTimeoutSeconds=0 + +# Add entry timeout when broker tries to publish message to bookkeeper (0 to disable it). +managedLedgerAddEntryTimeoutSeconds=0 + +# New entries check delay for the cursor under the managed ledger. +# If no new messages in the topic, the cursor will try to check again after the delay time. +# For consumption latency sensitive scenario, can set to a smaller value or set to 0. +# Of course, use a smaller value may degrade consumption throughput. Default is 10ms. +managedLedgerNewEntriesCheckDelayInMillis=10 + +# Use Open Range-Set to cache unacked messages +managedLedgerUnackedRangesOpenCacheSetEnabled=true + +# Managed ledger prometheus stats latency rollover seconds (default: 60s) +managedLedgerPrometheusStatsLatencyRolloverSeconds=60 + +# Whether trace managed ledger task execution time +managedLedgerTraceTaskExecution=true + +### --- Load balancer --- ### + +loadManagerClassName=org.apache.pulsar.broker.loadbalance.NoopLoadManager + +# Enable load balancer +loadBalancerEnabled=false + +# Percentage of change to trigger load report update +loadBalancerReportUpdateThresholdPercentage=10 + +# maximum interval to update load report +loadBalancerReportUpdateMaxIntervalMinutes=15 + +# Frequency of report to collect +loadBalancerHostUsageCheckIntervalMinutes=1 + +# Load shedding interval. Broker periodically checks whether some traffic should be offload from +# some over-loaded broker to other under-loaded brokers +loadBalancerSheddingIntervalMinutes=1 + +# Prevent the same topics to be shed and moved to other broker more that once within this timeframe +loadBalancerSheddingGracePeriodMinutes=30 + +# Usage threshold to allocate max number of topics to broker +loadBalancerBrokerMaxTopics=50000 + +# Interval to flush dynamic resource quota to ZooKeeper +loadBalancerResourceQuotaUpdateIntervalMinutes=15 + +# enable/disable namespace bundle auto split +loadBalancerAutoBundleSplitEnabled=true + +# enable/disable automatic unloading of split bundles +loadBalancerAutoUnloadSplitBundlesEnabled=true + +# maximum topics in a bundle, otherwise bundle split will be triggered +loadBalancerNamespaceBundleMaxTopics=1000 + +# maximum sessions (producers + consumers) in a bundle, otherwise bundle split will be triggered +loadBalancerNamespaceBundleMaxSessions=1000 + +# maximum msgRate (in + out) in a bundle, otherwise bundle split will be triggered +loadBalancerNamespaceBundleMaxMsgRate=30000 + +# maximum bandwidth (in + out) in a bundle, otherwise bundle split will be triggered +loadBalancerNamespaceBundleMaxBandwidthMbytes=100 + +# maximum number of bundles in a namespace +loadBalancerNamespaceMaximumBundles=128 + +# The broker resource usage threshold. +# When the broker resource usage is gratter than the pulsar cluster average resource usge, +# the threshold shedder will be triggered to offload bundles from the broker. +# It only take effect in ThresholdSheddler strategy. +loadBalancerBrokerThresholdShedderPercentage=10 + +# When calculating new resource usage, the history usage accounts for. +# It only take effect in ThresholdSheddler strategy. +loadBalancerHistoryResourcePercentage=0.9 + +# The BandWithIn usage weight when calculating new resourde usage. +# It only take effect in ThresholdShedder strategy. +loadBalancerBandwithInResourceWeight=1.0 + +# The BandWithOut usage weight when calculating new resourde usage. +# It only take effect in ThresholdShedder strategy. +loadBalancerBandwithOutResourceWeight=1.0 + +# The CPU usage weight when calculating new resourde usage. +# It only take effect in ThresholdShedder strategy. +loadBalancerCPUResourceWeight=1.0 + +# The heap memory usage weight when calculating new resourde usage. +# It only take effect in ThresholdShedder strategy. +loadBalancerMemoryResourceWeight=1.0 + +# The direct memory usage weight when calculating new resourde usage. +# It only take effect in ThresholdShedder strategy. +loadBalancerDirectMemoryResourceWeight=1.0 + +# Bundle unload minimum throughput threshold (MB), avoding bundle unload frequently. +# It only take effect in ThresholdShedder strategy. +loadBalancerBundleUnloadMinThroughputThreshold=10 + +### --- Replication --- ### + +# Enable replication metrics +replicationMetricsEnabled=true + +# Max number of connections to open for each broker in a remote cluster +# More connections host-to-host lead to better throughput over high-latency +# links. +replicationConnectionsPerBroker=16 + +# Replicator producer queue size +replicationProducerQueueSize=1000 + +# Duration to check replication policy to avoid replicator inconsistency +# due to missing ZooKeeper watch (disable with value 0) +replicationPolicyCheckDurationSeconds=600 + +# Default message retention time +defaultRetentionTimeInMinutes=0 + +# Default retention size +defaultRetentionSizeInMB=0 + +# How often to check whether the connections are still alive +keepAliveIntervalSeconds=30 + +### --- WebSocket --- ### + +# Enable the WebSocket API service in broker +webSocketServiceEnabled=true + +# Number of IO threads in Pulsar Client used in WebSocket proxy +webSocketNumIoThreads=8 + +# Number of connections per Broker in Pulsar Client used in WebSocket proxy +webSocketConnectionsPerBroker=8 + +# Time in milliseconds that idle WebSocket session times out +webSocketSessionIdleTimeoutMillis=300000 + +# The maximum size of a text message during parsing in WebSocket proxy +webSocketMaxTextFrameSize=1048576 + +### --- Metrics --- ### + +# Enable topic level metrics +exposeTopicLevelMetricsInPrometheus=true + +# Classname of Pluggable JVM GC metrics logger that can log GC specific metrics +# jvmGCMetricsLoggerClassName= + +### --- Broker Web Stats --- ### + +# Enable topic level metrics +exposePublisherStats=true + +# Enable expose the precise backlog stats. +# Set false to use published counter and consumed counter to calculate, this would be more efficient but may be inaccurate. +# Default is false. +exposePreciseBacklogInPrometheus=false + +### --- Deprecated config variables --- ### + +# Deprecated. Use configurationStoreServers +globalZookeeperServers= + +# Deprecated. Use brokerDeleteInactiveTopicsFrequencySeconds +brokerServicePurgeInactiveFrequencyInSeconds=60 + +### --- BookKeeper Configuration --- ##### + +ledgerStorageClass=org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage + +# The maximum netty frame size in bytes. Any message received larger than this will be rejected. The default value is 5MB. +nettyMaxFrameSizeBytes=134217728 + +# Size of Write Cache. Memory is allocated from JVM direct memory. +# Write cache is used to buffer entries before flushing into the entry log +# For good performance, it should be big enough to hold a substantial amount +# of entries in the flush interval +# By default it will be allocated to 1/4th of the available direct memory +dbStorage_writeCacheMaxSizeMb= + +# Size of Read cache. Memory is allocated from JVM direct memory. +# This read cache is pre-filled doing read-ahead whenever a cache miss happens +# By default it will be allocated to 1/4th of the available direct memory +dbStorage_readAheadCacheMaxSizeMb= + +# How many entries to pre-fill in cache after a read cache miss +dbStorage_readAheadCacheBatchSize=1000 + +flushInterval=60000 + +## RocksDB specific configurations +## DbLedgerStorage uses RocksDB to store the indexes from +## (ledgerId, entryId) -> (entryLog, offset) + +# Size of RocksDB block-cache. For best performance, this cache +# should be big enough to hold a significant portion of the index +# database which can reach ~2GB in some cases +# Default is to use 10% of the direct memory size +dbStorage_rocksDB_blockCacheSize= + +# Other RocksDB specific tunables +dbStorage_rocksDB_writeBufferSizeMB=4 +dbStorage_rocksDB_sstSizeInMB=4 +dbStorage_rocksDB_blockSize=4096 +dbStorage_rocksDB_bloomFilterBitsPerKey=10 +dbStorage_rocksDB_numLevels=-1 +dbStorage_rocksDB_numFilesInLevel0=4 +dbStorage_rocksDB_maxSizeInLevel1MB=256 + +# Maximum latency to impose on a journal write to achieve grouping +journalMaxGroupWaitMSec=1 + +# Should the data be fsynced on journal before acknowledgment. +journalSyncData=false + + +# For each ledger dir, maximum disk space which can be used. +# Default is 0.95f. i.e. 95% of disk can be used at most after which nothing will +# be written to that partition. If all ledger dir partions are full, then bookie +# will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will +# shutdown. +# Valid values should be in between 0 and 1 (exclusive). +diskUsageThreshold=0.99 + +# The disk free space low water mark threshold. +# Disk is considered full when usage threshold is exceeded. +# Disk returns back to non-full state when usage is below low water mark threshold. +# This prevents it from going back and forth between these states frequently +# when concurrent writes and compaction are happening. This also prevent bookie from +# switching frequently between read-only and read-writes states in the same cases. +diskUsageWarnThreshold=0.99 + +# Whether the bookie allowed to use a loopback interface as its primary +# interface(i.e. the interface it uses to establish its identity)? +# By default, loopback interfaces are not allowed as the primary +# interface. +# Using a loopback interface as the primary interface usually indicates +# a configuration error. For example, its fairly common in some VPS setups +# to not configure a hostname, or to have the hostname resolve to +# 127.0.0.1. If this is the case, then all bookies in the cluster will +# establish their identities as 127.0.0.1:3181, and only one will be able +# to join the cluster. For VPSs configured like this, you should explicitly +# set the listening interface. +allowLoopback=true + +# How long the interval to trigger next garbage collection, in milliseconds +# Since garbage collection is running in background, too frequent gc +# will heart performance. It is better to give a higher number of gc +# interval if there is enough disk capacity. +gcWaitTime=300000 + +# Enable topic auto creation if new producer or consumer connected (disable auto creation with value false) +allowAutoTopicCreation=true + +# The type of topic that is allowed to be automatically created.(partitioned/non-partitioned) +allowAutoTopicCreationType=non-partitioned + +# Enable subscription auto creation if new consumer connected (disable auto creation with value false) +allowAutoSubscriptionCreation=true + +# The number of partitioned topics that is allowed to be automatically created if allowAutoTopicCreationType is partitioned. +defaultNumPartitions=1 + +### --- Transaction config variables --- ### +transactionMetadataStoreProviderClassName=org.apache.pulsar.transaction.coordinator.impl.InMemTransactionMetadataStoreProvider diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/rabbitmq/enabled_plugins b/deploy/docker-compose/docker-deploy/training_template/backends/spark/rabbitmq/enabled_plugins new file mode 100644 index 0000000000..be0a921c97 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/spark/rabbitmq/enabled_plugins @@ -0,0 +1 @@ +[rabbitmq_federation_management,rabbitmq_federation]. \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/spark/spark-defaults.conf b/deploy/docker-compose/docker-deploy/training_template/backends/spark/spark/spark-defaults.conf new file mode 100644 index 0000000000..fdeaf62c4c --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/backends/spark/spark/spark-defaults.conf @@ -0,0 +1,4 @@ +spark.master spark://spark-master:7077 +#spark.eventLog.enabled true +#spark.eventLog.dir hdfs://namenode:9000/spark/logs +spark.cores.max 4 \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/training_template/docker-compose-eggroll.yml b/deploy/docker-compose/docker-deploy/training_template/docker-compose-eggroll.yml new file mode 100644 index 0000000000..46d35bd478 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/docker-compose-eggroll.yml @@ -0,0 +1,181 @@ +# Copyright 2019-2022 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +version: '3.7' +networks: + fate-network: + ipam: + config: + - subnet: 192.167.0.0/16 + +volumes: + fate_flow_logs: + download_dir: + shared_dir_examples: + driver: local + driver_opts: + type: none + o: bind + device: /examples + shared_dir_fate: + driver: local + driver_opts: + type: none + o: bind + device: /fate + shared_dir_data: + driver: local + driver_opts: + type: none + o: bind + device: /data + +services: + osx: + image: "${RegistryURI}${OSX_IMAGE}:${OSX_IMAGE_TAG}" + restart: always + ports: + - "9370:9370" + environment: + PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION: python + volumes: + - ./confs/osx/conf/:/data/projects/fate/osx/conf/broker/ + - /etc/localtime:/etc/localtime:ro + networks: + - fate-network + command: ["sh", "-c", "java -XX:+UseG1GC -XX:G1HeapRegionSize=16m -XX:G1ReservePercent=25 -XX:InitiatingHeapOccupancyPercent=30 -XX:SoftRefLRUPolicyMSPerMB=0 -verbose:gc -Xloggc:/dev/shm/rmq_srv_gc_%p_%t.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintAdaptiveSizePolicy -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=30m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/oom/heapdump.hprof -server -Xms4g -Xmx4g -XX:-OmitStackTraceInFastThrow -XX:+AlwaysPreTouch -XX:MaxDirectMemorySize=15g -XX:-UseLargePages -XX:-UseBiasedLocking -cp conf/broker/:lib/*:extension/*:/data/projects/fate/osx/lib/osx-broker-1.0.0.jar org.fedai.osx.broker.Bootstrap -c /data/projects/fate/osx/conf"] + + fateboard: + image: "${FATEBoard_IMAGE}:${FATEBoard_IMAGE_TAG}" + restart: always + ports: + - "8080:8080" + volumes: + - ./confs/fateboard/conf:/data/projects/fate/fateboard/conf + - fate_flow_logs:/data/projects/fate/fate_flow/logs + - /etc/localtime:/etc/localtime:ro + networks: + - fate-network + depends_on: + - fateflow + + clustermanager: + image: "${EGGRoll_IMAGE}:${EGGRoll_IMAGE_TAG}" + restart: always + expose: + - 4670 + volumes: + - ./confs/eggroll/conf/:/data/projects/fate/eggroll/conf/ + - /etc/localtime:/etc/localtime:ro + - shared_dir_fate:/data/projects/fate/fate + environment: + PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION: python + networks: + - fate-network + command: ["bash", "-c", "java -server -Dlog4j.configurationFile=$${EGGROLL_HOME}/conf/log4j2.xml -Dmodule=clustermanager -cp $${EGGROLL_HOME}/lib/*: org.fedai.eggroll.clustermanager.Bootstrap -p 4670 -s EGGROLL_DAEMON"] + + nodemanager: + image: "${EGGRoll_IMAGE}:${EGGRoll_IMAGE_TAG}" + restart: always + expose: + - 4671 + volumes: + - ./confs/eggroll/conf:/data/projects/fate/eggroll/conf/ + - ./confs/fate_flow/conf/service_conf.yaml:/data/projects/fate/conf/service_conf.yaml + - ./shared_dir/data/nodemanager:/data/projects/fate/eggroll/data + - /etc/localtime:/etc/localtime:ro + - shared_dir_fate:/data/projects/fate/fate + depends_on: + - clustermanager + networks: + - fate-network + environment: + PYTHONPATH: /data/projects/fate/fate/python:/data/projects/fate/fate_flow/python:/data/projects/fate/fate_client/python:/data/projects/fate/eggroll/python + cap_add: + - SYS_PTRACE + command: ["bash", "-c", "java -server -Dlog4j.configurationFile=$${EGGROLL_HOME}/conf/log4j2.xml -Dmodule=nodemanager -cp $${EGGROLL_HOME}/lib/*: org.fedai.eggroll.nodemanager.Bootstrap -p 4671 -s EGGROLL_DAEMON"] + + fateflow: + image: "${FATEFlow_IMAGE}:${FATEFlow_IMAGE_TAG}" + environment: + FATE_PROJECT_BASE: "/data/projects/fate" + FATE_LOG_LEVEL: "DEBUG" + ports: + - "9360:9360" + - "9380:9380" + restart: always + volumes: + - shared_dir_fate:/data/projects/fate/fate + - shared_dir_examples:/data/projects/fate/examples + - download_dir:/data/projects/fate/fate/python/download_dir + - fate_flow_logs:/data/projects/fate/fate_flow/logs + - ./confs/fate_flow/conf/service_conf.yaml:/data/projects/fate/fate_flow/conf/service_conf.yaml + - ./confs/fate_flow/conf/pulsar_route_table.yaml:/data/projects/fate/fate_flow/conf/pulsar_route_table.yaml + - ./confs/fate_flow/conf/rabbitmq_route_table.yaml:/data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml + - ./confs/eggroll/conf:/data/projects/fate/eggroll/conf + - ./shared_dir/data/model_local_cache:/data/projects/fate/fate_flow/model_local_cache + - /etc/localtime:/etc/localtime:ro + depends_on: + - mysql + - osx + - clustermanager + - nodemanager + networks: + fate-network: + ipv4_address: 192.167.0.100 + healthcheck: + test: ["CMD", "curl", "-f", "-X GET", "http://192.167.0.100:9380/v2/server/fateflow"] + interval: 1m30s + timeout: 10s + retries: 3 + start_period: 40s + command: + - "/bin/bash" + - "-c" + - | + set -x + pip install cryptography && sleep 5 && python fate_flow/python/fate_flow/fate_flow_server.py --debug + client: + image: "${Client_IMAGE}:${Client_IMAGE_TAG}" + ports: + - "20000:20000" + restart: always + environment: + FATE_FLOW_IP: "fateflow" + FATE_FLOW_PORT: "9380" + FATE_SERVING_HOST: "fate-serving:8059" + NOTEBOOK_HASHED_PASSWORD: "${NOTEBOOK_HASHED_PASSWORD}" + volumes: + - download_dir:/data/projects/fate/download_dir + - shared_dir_examples:/data/projects/fate/examples + - /etc/localtime:/etc/localtime:ro + depends_on: + - fateflow + networks: + - fate-network + command: ["bash", "-c", "pipeline init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && flow init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && jupyter notebook --ip=0.0.0.0 --port=20000 --allow-root --debug --NotebookApp.notebook_dir='/data/projects/fate/' --no-browser --NotebookApp.token='' --NotebookApp.password=$${NOTEBOOK_HASHED_PASSWORD} "] + + mysql: + image: "${MySQL_IMAGE}:${MySQL_IMAGE_TAG}" + expose: + - 3306 + volumes: + - ./confs/mysql/init:/docker-entrypoint-initdb.d/ + - ./shared_dir/data/mysql:/var/lib/mysql + - /etc/localtime:/etc/localtime:ro + restart: always + cap_add: + - SYS_NICE + environment: + MYSQL_ALLOW_EMPTY_PASSWORD: "yes" + networks: + - fate-network diff --git a/deploy/docker-compose/docker-deploy/training_template/docker-compose-exchange.yml b/deploy/docker-compose/docker-deploy/training_template/docker-compose-exchange.yml new file mode 100644 index 0000000000..b6cb916ba3 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/docker-compose-exchange.yml @@ -0,0 +1,24 @@ +# Copyright 2019-2022 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +version: '3.7' + +services: + exchange: + image: "federatedai/eggroll:${TAG}" + restart: always + ports: + - "9371:9370" + volumes: + - ./conf:/data/projects/fate/eggroll/conf + - /etc/localtime:/etc/localtime:ro + command: ["bash", "-c", "java -Dlog4j.configurationFile=$${EGGROLL_HOME}/conf/log4j2.properties -cp $${EGGROLL_HOME}/lib/*:$${EGGROLL_HOME}/conf/ com.webank.eggroll.rollsite.EggSiteBootstrap -c $${EGGROLL_HOME}/conf/eggroll.properties"] diff --git a/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark-slim.yml b/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark-slim.yml new file mode 100644 index 0000000000..d316943672 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark-slim.yml @@ -0,0 +1,182 @@ +# Copyright 2019-2022 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +version: "3.7" + +networks: + fate-network: + ipam: + config: + - subnet: 192.167.0.0/16 + +volumes: + fate_flow_logs: + download_dir: + shared_dir_examples: + driver: local + driver_opts: + type: none + o: bind + device: /examples + shared_dir_fate: + driver: local + driver_opts: + type: none + o: bind + device: /fate + shared_dir_data: + driver: local + driver_opts: + type: none + o: bind + device: /data + +services: + fateboard: + image: "${FATEBoard_IMAGE}:${FATEBoard_IMAGE_TAG}" + ports: + - "8080:8080" + volumes: + - ./confs/fateboard/conf:/data/projects/fate/fateboard/conf + - fate_flow_logs:/data/projects/fate/fate_flow/logs + - /etc/localtime:/etc/localtime:ro + restart: always + networks: + - fate-network + depends_on: + - fateflow + + fateflow: + image: "${FATEFlow_IMAGE}:${FATEFlow_IMAGE_TAG}" + restart: always + ports: + - 9380:9380 + - 9360:9360 + volumes: + - ./confs/spark/spark-defaults.conf:/data/projects/spark-3.1.3-bin-hadoop3.2/conf/spark-defaults.conf + - shared_dir_fate:/data/projects/fate/fate + - shared_dir_examples:/data/projects/fate/examples + - download_dir:/data/projects/fate/fate/python/download_dir + - fate_flow_logs:/data/projects/fate/fate_flow/logs + - ./confs/fate_flow/conf/service_conf.yaml:/data/projects/fate/fate_flow/conf/service_conf.yaml + - ./confs/fate_flow/conf/pulsar_route_table.yaml:/data/projects/fate/fate_flow/conf/pulsar_route_table.yaml + - ./confs/fate_flow/conf/rabbitmq_route_table.yaml:/data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml + - ./confs/eggroll/conf:/data/projects/fate/eggroll/conf + - ./shared_dir/data/model_local_cache:/data/projects/fate/fate_flow/model_local_cache + - /etc/localtime:/etc/localtime:ro + networks: + fate-network: + ipv4_address: 192.167.0.100 + healthcheck: + test: ["CMD", "curl", "-f", "-X GET", "http://192.167.0.100:9380/v2/server/fateflow"] + interval: 1m30s + timeout: 10s + retries: 3 + start_period: 40s + command: + - "/bin/bash" + - "-c" + - | + set -x + sed -i "s/int(party.party_id)/str(party.party_id)/g" /data/projects/fate/fate/python/fate/arch/federation/backends/pulsar/_federation.py + cp /data/projects/fate/fate_flow/conf/pulsar_route_table.yaml /data/projects/fate/fate_flow/pulsar_route_table.yaml + cp /data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml /data/projects/fate/fate_flow/rabbitmq_route_table.yaml + sleep 5 && python fate_flow/python/fate_flow/fate_flow_server.py + environment: + FATE_PROJECT_BASE: "/data/projects/fate" + FATE_FLOW_UPLOAD_MAX_NUM: "1000000" + FATE_FLOW_UPLOAD_MAX_BYTES: "104868093952" + FATE_LOG_LEVEL: "INFO" + + mysql: + image: "${MySQL_IMAGE}:${MySQL_IMAGE_TAG}" + expose: + - 3306 + volumes: + - ./confs/mysql/init:/docker-entrypoint-initdb.d/ + - ./shared_dir/data/mysql:/var/lib/mysql + - /etc/localtime:/etc/localtime:ro + restart: always + environment: + MYSQL_ALLOW_EMPTY_PASSWORD: "yes" + networks: + - fate-network + cap_add: + - SYS_NICE + + osx: + image: "${RegistryURI}${OSX_IMAGE}:${OSX_IMAGE_TAG}" + restart: always + ports: + - "9370:9370" + environment: + PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION: python + volumes: + - ./confs/osx/conf/:/data/projects/fate/osx/conf/broker/ + - /etc/localtime:/etc/localtime:ro + networks: + - fate-network + command: ["sh", "-c", "java -XX:+UseG1GC -XX:G1HeapRegionSize=16m -XX:G1ReservePercent=25 -XX:InitiatingHeapOccupancyPercent=30 -XX:SoftRefLRUPolicyMSPerMB=0 -verbose:gc -Xloggc:/dev/shm/rmq_srv_gc_%p_%t.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintAdaptiveSizePolicy -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=30m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/oom/heapdump.hprof -server -Xms4g -Xmx4g -XX:-OmitStackTraceInFastThrow -XX:+AlwaysPreTouch -XX:MaxDirectMemorySize=15g -XX:-UseLargePages -XX:-UseBiasedLocking -cp conf/broker/:lib/*:extension/*:/data/projects/fate/osx/lib/osx-broker-1.0.0.jar org.fedai.osx.broker.Bootstrap -c /data/projects/fate/osx/conf"] + + # rabbitmq: + # image: "${RabbitMQ_IMAGE}:${RabbitMQ_IMAGE_TAG}" + # ports: + # - "5672:5672" + # - "15672:15672" + # environment: + # RABBITMQ_DEFAULT_USER: fate + # RABBITMQ_DEFAULT_PASS: fate + # RABBITMQ_USER: fate + # RABBITMQ_PASSWORD: fate + # RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit max_message_size 536870912" + # volumes: + # - ./confs/rabbitmq/enabled_plugins:/etc/rabbitmq/enabled_plugins + # - ./shared_dir/data/rabbitmq:/var/lib/rabbitmq + # restart: always + # networks: + # - fate-network + + # pulsar: + # image: "${Pulsar_IMAGE}:${Pulsar_IMAGE_TAG}" + # ports: + # - "6650:6650" + # - "6651:6651" + # - "8001:8080" + # volumes: + # - ./confs/pulsar/standalone.conf:/pulsar/conf/standalone.conf + # # - ./shared_dir/data/pulsar:/pulsar/data + # - /etc/localtime:/etc/localtime:ro + # command: + # ["/bin/bash", "-c", "bin/pulsar standalone -nss"] + # restart: always + # networks: + # - fate-network + + client: + image: "${Client_IMAGE}:${Client_IMAGE_TAG}" + ports: + - "20000:20000" + restart: always + environment: + FATE_FLOW_IP: "fateflow" + FATE_FLOW_PORT: "9380" + FATE_SERVING_HOST: "fate-serving:8059" + NOTEBOOK_HASHED_PASSWORD: "${NOTEBOOK_HASHED_PASSWORD}" + volumes: + - download_dir:/data/projects/fate/download_dir + - shared_dir_examples:/data/projects/fate/examples + - /etc/localtime:/etc/localtime:ro + depends_on: + - fateflow + networks: + - fate-network + command: ["bash", "-c", "pipeline init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && flow init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && jupyter notebook --ip=0.0.0.0 --port=20000 --allow-root --debug --NotebookApp.notebook_dir='/data/projects/fate/' --no-browser --NotebookApp.token='' --NotebookApp.password=$${NOTEBOOK_HASHED_PASSWORD} "] diff --git a/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark.yml b/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark.yml new file mode 100644 index 0000000000..1969180351 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark.yml @@ -0,0 +1,269 @@ +# Copyright 2019-2022 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# you may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +version: "3.7" + +networks: + fate-network: + ipam: + config: + - subnet: 192.167.0.0/16 + +volumes: + fate_flow_logs: + download_dir: + shared_dir_examples: + driver: local + driver_opts: + type: none + o: bind + device: /examples + shared_dir_fate: + driver: local + driver_opts: + type: none + o: bind + device: /fate + shared_dir_data: + driver: local + driver_opts: + type: none + o: bind + device: /data + +services: + fateboard: + image: "${FATEBoard_IMAGE}:${FATEBoard_IMAGE_TAG}" + ports: + - "8080:8080" + volumes: + - ./confs/fateboard/conf:/data/projects/fate/fateboard/conf + - fate_flow_logs:/data/projects/fate/fate_flow/logs + - /etc/localtime:/etc/localtime:ro + networks: + - fate-network + restart: always + depends_on: + - fateflow + + fateflow: + image: "${FATEFlow_IMAGE}:${FATEFlow_IMAGE_TAG}" + restart: always + ports: + - 9380:9380 + - 9360:9360 + volumes: + - ./confs/spark/spark-defaults.conf:/data/projects/spark-3.1.3-bin-hadoop3.2/conf/spark-defaults.conf + - shared_dir_fate:/data/projects/fate/fate + - shared_dir_examples:/data/projects/fate/examples + - download_dir:/data/projects/fate/fate/python/download_dir + - fate_flow_logs:/data/projects/fate/fate_flow/logs + - ./confs/fate_flow/conf/service_conf.yaml:/data/projects/fate/fate_flow/conf/service_conf.yaml + - ./confs/fate_flow/conf/pulsar_route_table.yaml:/data/projects/fate/fate_flow/conf/pulsar_route_table.yaml + - ./confs/fate_flow/conf/rabbitmq_route_table.yaml:/data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml + - ./confs/eggroll/conf:/data/projects/fate/eggroll/conf + - ./shared_dir/data/model_local_cache:/data/projects/fate/fate_flow/model_local_cache + - /etc/localtime:/etc/localtime:ro + networks: + fate-network: + ipv4_address: 192.167.0.100 + healthcheck: + test: ["CMD", "curl", "-f", "-X GET", "http://192.167.0.100:9380/v2/server/fateflow"] + interval: 1m30s + timeout: 10s + retries: 3 + start_period: 40s + command: + - "/bin/bash" + - "-c" + - | + set -x + sed -i "s/int(party.party_id)/str(party.party_id)/g" /data/projects/fate/fate/python/fate/arch/federation/backends/pulsar/_federation.py + cp /data/projects/fate/fate_flow/conf/pulsar_route_table.yaml /data/projects/fate/fate_flow/pulsar_route_table.yaml + cp /data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml /data/projects/fate/fate_flow/rabbitmq_route_table.yaml + sleep 5 && python fate_flow/python/fate_flow/fate_flow_server.py + environment: + FATE_PROJECT_BASE: "/data/projects/fate" + FATE_FLOW_UPLOAD_MAX_NUM: "1000000" + FATE_FLOW_UPLOAD_MAX_BYTES: "104868093952" + FATE_LOG_LEVEL: "INFO" + + namenode: + image: "${Hadoop_NameNode_IMAGE}:${Hadoop_NameNode_IMAGE_TAG}" + restart: always + ports: + - 9000:9000 + - 9870:9870 + volumes: + - ./shared_dir/data/namenode:/hadoop/dfs/name + - ./confs/hadoop/core-site.xml:/etc/hadoop/core-site.xml + - /etc/localtime:/etc/localtime:ro + env_file: + - ./confs/hadoop/hadoop.env + environment: + - CLUSTER_NAME=fate + networks: + - fate-network + + datanode-0: + image: "${Hadoop_DataNode_IMAGE}:${Hadoop_DataNode_IMAGE_TAG}" + restart: always + volumes: + - /etc/localtime:/etc/localtime:ro + - ./shared_dir/data/datanode-0:/hadoop/dfs/data + environment: + SERVICE_PRECONDITION: "namenode:9000" + env_file: + - ./confs/hadoop/hadoop.env + networks: + - fate-network + + datanode-1: + image: "${Hadoop_DataNode_IMAGE}:${Hadoop_DataNode_IMAGE_TAG}" + restart: always + volumes: + - /etc/localtime:/etc/localtime:ro + - ./shared_dir/data/datanode-1:/hadoop/dfs/data + environment: + SERVICE_PRECONDITION: "namenode:9000" + env_file: + - ./confs/hadoop/hadoop.env + networks: + - fate-network + + datanode-2: + image: "${Hadoop_DataNode_IMAGE}:${Hadoop_DataNode_IMAGE_TAG}" + restart: always + volumes: + - /etc/localtime:/etc/localtime:ro + - ./shared_dir/data/datanode-2:/hadoop/dfs/data + environment: + SERVICE_PRECONDITION: "namenode:9000" + env_file: + - ./confs/hadoop/hadoop.env + networks: + - fate-network + + spark-master: + image: "${Spark_Master_IMAGE}:${Spark_Master_IMAGE_TAG}" + restart: always + ports: + - "8888:8080" + - "7077:7077" + volumes: + - /etc/localtime:/etc/localtime:ro + environment: + INIT_DAEMON_STEP: setup_spark + networks: + - fate-network + + spark-worker: + image: "${Spark_Worker_IMAGE}:${Spark_Worker_IMAGE_TAG}" + restart: always + depends_on: + - spark-master + ports: + - "8081:8081" + environment: + SPARK_MASTER: "spark://spark-master:7077" + volumes: + - ./confs/fate_flow/conf:/data/projects/fate/conf + - /etc/localtime:/etc/localtime:ro + networks: + - fate-network + + rabbitmq: + image: "${RabbitMQ_IMAGE}:${RabbitMQ_IMAGE_TAG}" + ports: + - "5672:5672" + - "15672:15672" + environment: + RABBITMQ_DEFAULT_USER: fate + RABBITMQ_DEFAULT_PASS: fate + RABBITMQ_USER: fate + RABBITMQ_PASSWORD: fate + RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit max_message_size 536870912" + volumes: + - ./confs/rabbitmq/enabled_plugins:/etc/rabbitmq/enabled_plugins + - ./shared_dir/data/rabbitmq:/var/lib/rabbitmq + - /etc/localtime:/etc/localtime:ro + restart: always + networks: + - fate-network + + pulsar: + image: "${Pulsar_IMAGE}:${Pulsar_IMAGE_TAG}" + ports: + - "6650:6650" + - "6651:6651" + - "8001:8080" + user: root + volumes: + - ./confs/pulsar/standalone.conf:/pulsar/conf/standalone.conf + # - ./shared_dir/data/pulsar:/pulsar/data + - /etc/localtime:/etc/localtime:ro + command: + ["/bin/bash", "-c", "bin/pulsar standalone -nss"] + restart: always + networks: + - fate-network + + mysql: + image: "${MySQL_IMAGE}:${MySQL_IMAGE_TAG}" + expose: + - 3306 + volumes: + - ./confs/mysql/init:/docker-entrypoint-initdb.d/ + - ./shared_dir/data/mysql:/var/lib/mysql + - /etc/localtime:/etc/localtime:ro + restart: always + environment: + MYSQL_ALLOW_EMPTY_PASSWORD: "yes" + networks: + - fate-network + cap_add: + - SYS_NICE + + nginx: + image: "${Nginx_IMAGE}:${Nginx_IMAGE_TAG}" + ports: + - 9300:9300 + - 9310:9310 + volumes: + - ./confs/nginx/route_table.yaml:/data/projects/fate/proxy/nginx/conf/route_table.yaml + - ./confs/nginx/nginx.conf:/data/projects/fate/proxy/nginx/conf/nginx.conf + - /etc/localtime:/etc/localtime:ro + restart: always + networks: + - fate-network + depends_on: + - fateflow + + client: + image: "${Client_IMAGE}:${Client_IMAGE_TAG}" + ports: + - "20000:20000" + restart: always + environment: + FATE_FLOW_IP: "fateflow" + FATE_FLOW_PORT: "9380" + FATE_SERVING_HOST: "fate-serving:8059" + NOTEBOOK_HASHED_PASSWORD: "${NOTEBOOK_HASHED_PASSWORD}" + volumes: + - download_dir:/data/projects/fate/download_dir + - shared_dir_examples:/data/projects/fate/examples + - /etc/localtime:/etc/localtime:ro + depends_on: + - fateflow + networks: + - fate-network + command: ["bash", "-c", "pipeline init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && flow init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && jupyter notebook --ip=0.0.0.0 --port=20000 --allow-root --debug --NotebookApp.notebook_dir='/data/projects/fate/' --no-browser --NotebookApp.token='' --NotebookApp.password=$${NOTEBOOK_HASHED_PASSWORD} "] diff --git a/deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/pulsar_route_table.yaml b/deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/pulsar_route_table.yaml new file mode 100644 index 0000000000..2e9d58030b --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/pulsar_route_table.yaml @@ -0,0 +1,17 @@ +9999: + host: 192.168.0.4 + port: 6650 + sslPort: 6651 + proxy: "" + +10000: + host: 192.168.0.3 + port: 6650 + sslPort: 6651 + proxy: "" + +default: + proxy: "proxy.fate.org:443" + domain: "fate.org" + brokerPort: 6650 + brokerSslPort: 6651 \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/rabbitmq_route_table.yaml b/deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/rabbitmq_route_table.yaml new file mode 100644 index 0000000000..8ff50453df --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/rabbitmq_route_table.yaml @@ -0,0 +1,6 @@ +9999: + host: guest + port: 5672 +10000: + host: rabbitmq + port: 5672 diff --git a/deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/service_conf.yaml b/deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/service_conf.yaml new file mode 100644 index 0000000000..fbe4295826 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/public/fate_flow/conf/service_conf.yaml @@ -0,0 +1,127 @@ +party_id: "9999" +use_registry: false +# DEBUG 10/INFO 20 +log_level: 20 +encrypt: + key_0: + module: fate_flow.hub.encrypt.password_encrypt#pwdecrypt + # base on: fate_flow/conf/ + private_path: private_key.pem +fateflow: + host: 192.167.0.100 + http_port: 9380 + grpc_port: 9360 + proxy_name: osx +# nginx: +# host: +# http_port: +# grpc_port: +database: + engine: mysql + # encrypt passwd key + decrypt_key: + mysql: + name: + user: + passwd: + host: + port: 3306 + max_connections: 100 + stale_timeout: 30 + sqlite: + # default fate_flow/runtime/system_settings: SQLITE_PATH + # /xxx/xxx.sqlite + path: +default_engines: + computing: eggroll + federation: osx + storage: eggroll +default_provider: + name: fate + # version default: fateflow.env + version: + device: local +computing: + standalone: + cores: 32 + eggroll: + cores: 32 + nodes: 1 + # cluster manager host and port + host: clustermanager + port: 4670 + spark: + # default use SPARK_HOME environment variable + home: /data/projects/spark-3.1.3-bin-hadoop3.2/ + cores: 32 +federation: + osx: + host: osx + port: 9370 + # stream or queue + mode: stream + pulsar: + host: pulsar + port: 6650 + mng_port: 8080 + cluster: standalone + tenant: fl-tenant + topic_ttl: 30 + # default conf/pulsar_route_table.yaml + route_table: conf/pulsar_route_table.yaml + # mode: replication / client, default: replication + mode: replication + max_message_size: 1048576 + nginx: + host: nginx + http_port: 9300 + grpc_port: 9310 + # http or grpc + protocol: http + rabbitmq: + host: rabbitmq + mng_port: 15672 + port: 5672 + user: fate + password: fate + # default conf/rabbitmq_route_table.yaml + route_table: conf/pulsar_route_table.yaml + # mode: replication / client, default: replication + mode: replication +storage: + hdfs: + name_node: hdfs://namenode:9000 +hook_module: + client_authentication: fate_flow.hook.flow.client_authentication + site_authentication: fate_flow.hook.flow.site_authentication + permission: fate_flow.hook.flow.permission +authentication: + client: false + site: false + permission: false +model_store: + engine: file + # encrypt passwd key + decrypt_key: + file: + # default fate_flow/runtime/system_settings: MODEL_STORE_PATH + path: + mysql: + name: fate_flow + user: fate + passwd: fate + host: 127.0.0.1 + port: 3306 + max_connections: 100 + stale_timeout: 30 + tencent_cos: + Region: + SecretId: + SecretKey: + Bucket: +zookeeper: + hosts: + - 127.0.0.1:2181 + use_acl: true + user: fate + password: fate diff --git a/deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/application.properties b/deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/application.properties new file mode 100644 index 0000000000..c2fbc0f256 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/application.properties @@ -0,0 +1,29 @@ +server.port=8080 +fateflow.url=http://localhost:9380 +#priority is higher than {fateflow.url}, split by ; +#below config can support configuring more than one fate flow for this fate board +fateflow.url-list= +fateflow.http_app_key= +fateflow.http_secret_key= +server.servlet.encoding.charset=UTF-8 +server.servlet.encoding.enabled=true +server.tomcat.uri-encoding=UTF-8 +fateboard.front_end.cors=false +fateboard.front_end.url=http://localhost:8028 +server.tomcat.max-threads=1000 +server.tomcat.max-connections=20000 +spring.servlet.multipart.max-file-size=10MB +spring.servlet.multipart.max-request-size=100MB +spring.servlet.session.timeout=1800s +server.compression.enabled=true +server.compression.mime-types=application/json,application/xml,text/html,text/xml,text/plain +server.board.login.username= +server.board.login.password= +server.board.encrypt.private_key= +server.board.encrypt.enable=false +#only [h,m,s] is available +server.servlet.session.timeout=4h +server.servlet.session.cookie.max-age=4h +management.endpoints.web.exposure.exclude=* +feign.client.config.default.connectTimeout=10000 +feign.client.config.default.readTimeout=10000 diff --git a/deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/ssh.properties b/deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/ssh.properties new file mode 100644 index 0000000000..e69de29bb2 diff --git a/deploy/docker-compose/docker-deploy/training_template/public/mysql/init/create-eggroll-meta-tables.sql b/deploy/docker-compose/docker-deploy/training_template/public/mysql/init/create-eggroll-meta-tables.sql new file mode 100644 index 0000000000..9e674c77f0 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/public/mysql/init/create-eggroll-meta-tables.sql @@ -0,0 +1,205 @@ +-- create database if not exists, default database is eggroll_meta +CREATE DATABASE IF NOT EXISTS `eggroll_meta`; + +-- all operation under this database +USE `eggroll_meta`; + +-- store_locator +CREATE TABLE IF NOT EXISTS `store_locator` +( + `store_locator_id` SERIAL PRIMARY KEY, + `store_type` VARCHAR(255) NOT NULL, + `namespace` VARCHAR(2000) NOT NULL DEFAULT 'DEFAULT', + `name` VARCHAR(2000) NOT NULL, + `path` VARCHAR(2000) NOT NULL DEFAULT '', + `total_partitions` INT UNSIGNED NOT NULL, + `key_serdes_type` INT NOT NULL DEFAULT 0, + `value_serdes_type` INT NOT NULL DEFAULT 0, + `partitioner_type` INT NOT NULL DEFAULT 0, + `version` INT UNSIGNED NOT NULL DEFAULT 0, + `status` VARCHAR(255) NOT NULL, + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; + +CREATE UNIQUE INDEX `idx_u_store_locator_ns_n` ON `store_locator` (`namespace`(120), `name`(640)); +CREATE INDEX `idx_store_locator_st` ON `store_locator` (`store_type`(255)); +CREATE INDEX `idx_store_locator_ns` ON `store_locator` (`namespace`(767)); +CREATE INDEX `idx_store_locator_n` ON `store_locator` (`name`(767)); +CREATE INDEX `idx_store_locator_s` ON `store_locator` (`status`(255)); +CREATE INDEX `idx_store_locator_v` ON `store_locator` (`version`); + + +-- store (option) +CREATE TABLE IF NOT EXISTS `store_option` +( + `store_option_id` SERIAL PRIMARY KEY, + `store_locator_id` BIGINT UNSIGNED NOT NULL, + `name` VARCHAR(255) NOT NULL, + `data` VARCHAR(2000) NOT NULL DEFAULT '', + `created_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; + +CREATE INDEX `idx_store_option_si` ON `store_option` (`store_locator_id`); + + +-- store_partition +CREATE TABLE IF NOT EXISTS `store_partition` +( + `store_partition_id` SERIAL PRIMARY KEY, -- self-increment sequence + `store_locator_id` BIGINT UNSIGNED NOT NULL, + `node_id` BIGINT UNSIGNED NOT NULL, + `partition_id` INT UNSIGNED NOT NULL, -- partition id of a store + `status` VARCHAR(255) NOT NULL, + `created_at` DATETIME DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; + +CREATE UNIQUE INDEX `idx_u_store_partition_si_spi_ni` ON `store_partition` (`store_locator_id`, `store_partition_id`, `node_id`); +CREATE INDEX `idx_store_partition_sli` ON `store_partition` (`store_locator_id`); +CREATE INDEX `idx_store_partition_ni` ON `store_partition` (`node_id`); +CREATE INDEX `idx_store_partition_s` ON `store_partition` (`status`(255)); + + +-- node +CREATE TABLE IF NOT EXISTS `server_node` +( + `server_node_id` SERIAL PRIMARY KEY, + `name` VARCHAR(2000) NOT NULL DEFAULT '', + `server_cluster_id` BIGINT UNSIGNED NOT NULL DEFAULT 0, + `host` VARCHAR(1000) NOT NULL, + `port` INT NOT NULL, + `node_type` VARCHAR(255) NOT NULL, + `status` VARCHAR(255) NOT NULL, + `last_heartbeat_at` DATETIME DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP, + `created_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; + +CREATE INDEX `idx_server_node_h_p_nt` ON `server_node` (`host`(600), `port`, `node_type`(100)); +CREATE INDEX `idx_server_node_h` ON `server_node` (`host`(767)); +CREATE INDEX `idx_server_node_sci` ON `server_node` (`server_cluster_id`); +CREATE INDEX `idx_server_node_nt` ON `server_node` (`node_type`(255)); +CREATE INDEX `idx_server_node_s` ON `server_node` (`status`(255)); + + +-- session (main) +CREATE TABLE IF NOT EXISTS `session_main` +( + `session_id` VARCHAR(767) PRIMARY KEY, + `name` VARCHAR(2000) NOT NULL DEFAULT '', + `status` VARCHAR(255) NOT NULL, + `status_reason` VARCHAR(255), + `before_status` VARCHAR(255), + `tag` VARCHAR(255), + `total_proc_count` INT, + `active_proc_count` INT, + `created_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; + +CREATE INDEX `idx_session_main_s` ON `session_main` (`status`); + + +-- session (option) +CREATE TABLE IF NOT EXISTS `session_option` +( + `session_option_id` SERIAL PRIMARY KEY, + `session_id` VARCHAR(2000), + `name` VARCHAR(255) NOT NULL, + `data` VARCHAR(2000) NOT NULL DEFAULT '', + `created_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; + +CREATE INDEX `idx_session_option_si` ON `session_option` (`session_id`(767)); + + +-- session (processor) +CREATE TABLE IF NOT EXISTS `session_processor` +( + `processor_id` SERIAL PRIMARY KEY, + `session_id` VARCHAR(767), + `server_node_id` INT NOT NULL, + `processor_type` VARCHAR(255) NOT NULL, + `status` VARCHAR(255), + `before_status` VARCHAR(255), + `tag` VARCHAR(255), + `command_endpoint` VARCHAR(255), + `transfer_endpoint` VARCHAR(255), + `processor_option` VARCHAR(512), + `pid` INT NOT NULL DEFAULT -1, + `created_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; + +CREATE INDEX `idx_session_processor_si` ON `session_processor` (`session_id`(767)); + + +CREATE TABLE IF NOT EXISTS `processor_resource` +( + `id` SERIAL PRIMARY KEY, + `processor_id` BIGINT NOT NULL, + `session_id` VARCHAR(767), + `server_node_id` INT NOT NULL, + `resource_type` VARCHAR(255), + `allocated` BIGINT NOT NULL default 0, + `extention` VARCHAR(512), + `status` VARCHAR(255), + `pid` INT NOT NULL DEFAULT -1, + `created_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; +CREATE INDEX `idx_processor_id_processor_resource` ON `processor_resource` (`processor_id`); +CREATE INDEX `idx_node_id_processor_resource` ON `processor_resource` (`server_node_id`); +CREATE INDEX `idx_session_id_processor_resource` ON `processor_resource` (`session_id`); +CREATE INDEX `idx_node_status_processor_resource` ON `processor_resource` (`server_node_id`,`resource_type`,`status`); + + + +CREATE TABLE IF NOT EXISTS `node_resource` +( + `resource_id` SERIAL PRIMARY KEY, + `server_node_id` BIGINT NOT NULL, + `resource_type` VARCHAR(255), + `total` BIGINT NOT NULL default 0, + `used` BIGINT NOT NULL default 0, + `pre_allocated` BIGINT NOT NULL default 0, + `allocated` BIGINT NOT NULL DEFAULT 0, + `extention` VARCHAR(512), + `status` VARCHAR(255), + `created_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + `updated_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; +CREATE INDEX `idx_node_id_node_resource` ON `node_resource` (`server_node_id`); +CREATE INDEX `idx_node_status_node_resource` ON `node_resource` (`server_node_id`,`status`); +CREATE UNIQUE INDEX `idx_u_node_resource` ON `node_resource` (`server_node_id`, `resource_type`); + + +CREATE TABLE IF NOT EXISTS `session_ranks` +( + `container_id` SERIAL PRIMARY KEY, + `session_id` VARCHAR(767), + `server_node_id` INT NOT NULL, + `global_rank` INT UNSIGNED NOT NULL, + `local_rank` INT UNSIGNED NOT NULL +) DEFAULT CHARACTER SET latin1 + COLLATE latin1_swedish_ci; + + CREATE INDEX `idx_session_id_session_ranks` ON `session_ranks` (`session_id`); + + + + + + diff --git a/deploy/docker-compose/docker-deploy/training_template/public/osx/conf/broker.properties b/deploy/docker-compose/docker-deploy/training_template/public/osx/conf/broker.properties new file mode 100644 index 0000000000..9d537b9976 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/public/osx/conf/broker.properties @@ -0,0 +1,61 @@ +grpc.port= 9370 +# Http switch for the server. +# If set to True, the server will open the http port. +# http port configuration can be set through http.port +open.http.server=false +# port of http +http.port=8087 +https.port=8088 +# whether the http server uses TLS +#ttp.use.tls = false +# whether the grpc server uses TLS? +# If true, a grpc port will be specially opened to listen for TLS requests +# grpc tls port configuration can be set through grpc.tls.port +open.grpc.tls.server=false +grpc.tls.port=9883 +# the partyId of self ,multiple partyIds can be set. +# eg: 9999,10000,10001 +self.party=9999 +# deployment mode, including cluster/standalone, +# respectively representing cluster mode and standalone mode , +# and standalone is used by default +deploy.mode=standalone +# the zookeeper address needs to be configured when the deployment mode is cluster +zk.url=127.0.0.1:2181 +stream.limit.mode=LOCAL + +# the IP of the cluster manager component of eggroll +eggroll.cluster.manager.ip = clustermanager +# the port of the cluster manager component of eggroll +eggroll.cluster.manager.port = 4670 +# maximum number of message retries +produce.msg.max.try.time =3 + +http.client.method.config = {"UNARY_CALL":{"reqTimeout":0,"connectionTimeout":0,"socketTimeout":0}} + +http.use.tls=false + +http.ssl.trust.store.type=PKCS12 + +http.ssl.key.store.alias=22 + +http.ssl.key.store.password=123456 + + +mapped.file.size=134217728 + +#http.ssl.trust.store.path=D:\\44\\127.0.0.1.pfx + +server.ca.file= +server.cert.chain.file= +server.private.key.file= + + + + + + + + + + diff --git a/deploy/docker-compose/docker-deploy/training_template/public/osx/conf/route_table.json b/deploy/docker-compose/docker-deploy/training_template/public/osx/conf/route_table.json new file mode 100644 index 0000000000..abe60b8c56 --- /dev/null +++ b/deploy/docker-compose/docker-deploy/training_template/public/osx/conf/route_table.json @@ -0,0 +1,26 @@ +{ + "route_table": + { + "9999": + { + "fateflow":[ + { + "port": 9360, + "ip": "127.0.0.1" + } + ] + }, + "10000":{ + "default":[{ + "protocol":"http", + "url": "http://127.0.0.1:8087/osx/inbound", + "ip": "127.0.0.1", + "port": 9370 + }] + } + }, + "permission": + { + "default_allow": true + } +} \ No newline at end of file From 73b274a9ad3e86e6cfa9ff1565fffed2a57d3934 Mon Sep 17 00:00:00 2001 From: robbie228 Date: Thu, 14 Nov 2024 10:02:51 +0800 Subject: [PATCH 2/3] delete serving_template Signed-off-by: robbie228 --- .../docker-compose-serving.yml | 84 ------------------- .../serving-admin/conf/application.properties | 32 ------- .../serving-proxy/conf/application.properties | 58 ------------- .../serving-proxy/conf/route_table.json | 30 ------- .../conf/serving-server.properties | 56 ------------- 5 files changed, 260 deletions(-) delete mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-compose-serving.yml delete mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-admin/conf/application.properties delete mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/application.properties delete mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/route_table.json delete mode 100644 deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-server/conf/serving-server.properties diff --git a/deploy/docker-compose/docker-deploy/serving_template/docker-compose-serving.yml b/deploy/docker-compose/docker-deploy/serving_template/docker-compose-serving.yml deleted file mode 100644 index a1f13c5072..0000000000 --- a/deploy/docker-compose/docker-deploy/serving_template/docker-compose-serving.yml +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2019-2022 VMware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# you may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -version: '3' - -networks: - fate-serving-network: - external: - name: - -volumes: - data_dir_server: - driver: local - driver_opts: - type: none - o: bind - device: /data/server - data_dir_zookeeper: - driver: local - driver_opts: - type: none - o: bind - device: /data/zookeeper - -services: - serving-server: - image: "federatedai/serving-server:${SERVING_TAG}" - ports: - - "8000:8000" - volumes: - - ./confs/serving-server/conf/serving-server.properties:/data/projects/fate-serving/serving-server/conf/serving-server.properties - - ./data/server:/data/projects/fate-serving/serving-server/.fate - - /etc/localtime:/etc/localtime:ro - networks: - - fate-serving-network - - serving-proxy: - image: "federatedai/serving-proxy:${SERVING_TAG}" - ports: - - "8059:8059" - - "8869:8869" - expose: - - 8879 - volumes: - - ./confs/serving-proxy/conf/application.properties:/data/projects/fate-serving/serving-proxy/conf/application.properties - - ./confs/serving-proxy/conf/route_table.json:/data/projects/fate-serving/serving-proxy/conf/route_table.json - - /etc/localtime:/etc/localtime:ro - networks: - - fate-serving-network - - serving-zookeeper: - image: "bitnami/zookeeper:3.7.0" - user: root - ports: - - "2181:2181" - - "2888" - - "3888" - volumes: - - ./data/zookeeper:/bitnami/zookeeper - - /etc/localtime:/etc/localtime:ro - environment: - ALLOW_ANONYMOUS_LOGIN: "yes" - networks: - - fate-serving-network - - serving-admin: - image: "federatedai/serving-admin:${SERVING_TAG}" - ports: - - "8350:8350" - volumes: - - ./confs/serving-admin/conf/application.properties:/data/projects/fate-serving/serving-admin/conf/application.properties - - /etc/localtime:/etc/localtime:ro - networks: - - fate-serving-network diff --git a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-admin/conf/application.properties b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-admin/conf/application.properties deleted file mode 100644 index 0333ede207..0000000000 --- a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-admin/conf/application.properties +++ /dev/null @@ -1,32 +0,0 @@ -# -# Copyright 2019 The FATE Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -server.port=8350 -# cache -#local.cache.expire=300 -# zk -zk.url=serving-zookeeper:2181 -# zk acl -#acl.enable=false -#acl.username= -#acl.password= -# grpc -#grpc.timeout=5000 -# username & password -admin.username= -admin.password= - -spring.mvc.pathmatch.matching-strategy=ANT_PATH_MATCHER \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/application.properties b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/application.properties deleted file mode 100644 index 979cd0b66b..0000000000 --- a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/application.properties +++ /dev/null @@ -1,58 +0,0 @@ -# -# Copyright 2019 The FATE Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# coordinator same as Party ID -coordinator= -server.port=8059 -#inference.service.name=serving -#random, consistent -#routeType=random -#route.table=/data/projects/fate-serving/serving-proxy/conf/route_table.json -#auth.file=/data/projects/fate-serving/serving-proxy/conf/auth_config.json -# zk router -#useZkRouter=true -zk.url=serving-zookeeper:2181 -useZkRouter=true -# zk acl -#acl.enable=false -#acl.username= -#acl.password= -# intra-partyid port -#proxy.grpc.intra.port=8879 -# inter-partyid port -#proxy.grpc.inter.port=8869 - -# grpc -# only support PLAINTEXT, TLS(we use Mutual TLS here), if use TSL authentication -#proxy.grpc.inter.negotiationType=PLAINTEXT -# only needs to be set when negotiationType is TLS -#proxy.grpc.inter.CA.file=/data/projects/fate-serving/serving-proxy/conf/ssl/ca.crt -# negotiated client side certificates -#proxy.grpc.inter.client.certChain.file=/data/projects/fate-serving/serving-proxy/conf/ssl/client.crt -#proxy.grpc.inter.client.privateKey.file=/data/projects/fate-serving/serving-proxy/conf/ssl/client.pem -# negotiated server side certificates -#proxy.grpc.inter.server.certChain.file=/data/projects/fate-serving/serving-proxy/conf/ssl/server.crt -#proxy.grpc.inter.server.privateKey.file=/data/projects/fate-serving/serving-proxy/conf/ssl/server.pem - -#proxy.grpc.inference.timeout=3000 -#proxy.grpc.inference.async.timeout=1000 -#proxy.grpc.unaryCall.timeout=3000 -#proxy.grpc.threadpool.coresize=50 -#proxy.grpc.threadpool.maxsize=100 -#proxy.grpc.threadpool.queuesize=10 -#proxy.async.timeout=5000 -#proxy.async.coresize=10 -#proxy.async.maxsize=100 -#proxy.grpc.batch.inference.timeout=10000 diff --git a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/route_table.json b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/route_table.json deleted file mode 100644 index 02be84c841..0000000000 --- a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-proxy/conf/route_table.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "route_table": { - "default": { - "default": [ - { - "ip": "other-proxy", - "port": 8869 - } - ] - }, - "9999": { - "default": [ - { - "ip": "serving-proxy", - "port": 8059 - } - ], - "serving": [ - { - "ip": "serving-server", - "port": 8000 - } - ] - } - }, - "permission": { - "default_allow": true - } - } - \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-server/conf/serving-server.properties b/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-server/conf/serving-server.properties deleted file mode 100644 index 6ef922b7f8..0000000000 --- a/deploy/docker-compose/docker-deploy/serving_template/docker-serving/serving-server/conf/serving-server.properties +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright 2019 The FATE Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -port=8000 -#serviceRoleName=serving -# cache -#remoteModelInferenceResultCacheSwitch=false -#cache.type=local -#model.cache.path=/data/projects/fate-serving/serving-server -# local cache -#local.cache.maxsize=10000 -#local.cache.expire=30 -#local.cache.interval=3 -# external cache -#redis.ip= -#redis.port= -### configure this parameter to use cluster mode -#redis.cluster.nodes=127.0.0.1:6379,127.0.0.1:6380,127.0.0.1:6381,127.0.0.1:6382,127.0.0.1:6383,127.0.0.1:6384 -### this password is common in stand-alone mode and cluster mode -#redis.password= -#redis.timeout=10 -#redis.expire=3000 -#redis.maxTotal=100 -#redis.maxIdle=100 -# external subsystem -proxy=serving-proxy:8879 -# adapter -feature.single.adaptor=com.webank.ai.fate.serving.adaptor.dataaccess.MockAdapter -feature.batch.adaptor=com.webank.ai.fate.serving.adaptor.dataaccess.MockBatchAdapter -http.adapter.url=http://127.0.0.1:9380/v1/http/adapter/getFeature -# model transfer -model.transfer.url=http://127.0.0.1:9380/v1/model/transfer -# zk router -zk.url=serving-zookeeper:2181 -useRegister=true -useZkRouter=true -# zk acl -#acl.enable=false -#acl.username= -#acl.password= - -# LR algorithm config -#lr.split.size=500 -#lr.use.parallel=false \ No newline at end of file From 33861d48a8ab0c5ba80f406a0dd3fc5d9f9bf423 Mon Sep 17 00:00:00 2001 From: robbie228 Date: Thu, 14 Nov 2024 10:50:00 +0800 Subject: [PATCH 3/3] delete spark Signed-off-by: robbie228 --- deploy/docker-compose/README_zh.md | 466 ++++----- .../docker-deploy/docker_deploy.sh | 36 +- .../docker-deploy/generate_config.sh | 46 +- .../backends/spark/hadoop/core-site.xml | 7 - .../backends/spark/hadoop/hadoop.env | 43 - .../backends/spark/nginx/nginx.conf | 68 -- .../backends/spark/nginx/route_table.yaml | 27 - .../backends/spark/pulsar/standalone.conf | 899 ------------------ .../backends/spark/rabbitmq/enabled_plugins | 1 - .../backends/spark/spark/spark-defaults.conf | 4 - .../docker-compose-eggroll.yml | 37 +- .../docker-compose-exchange.yml | 24 - .../docker-compose-spark-slim.yml | 182 ---- .../docker-compose-spark.yml | 269 ------ .../conf/application.properties | 0 .../conf/ssh.properties | 0 16 files changed, 218 insertions(+), 1891 deletions(-) delete mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/core-site.xml delete mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/hadoop.env delete mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/nginx.conf delete mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/route_table.yaml delete mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/pulsar/standalone.conf delete mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/rabbitmq/enabled_plugins delete mode 100644 deploy/docker-compose/docker-deploy/training_template/backends/spark/spark/spark-defaults.conf delete mode 100644 deploy/docker-compose/docker-deploy/training_template/docker-compose-exchange.yml delete mode 100644 deploy/docker-compose/docker-deploy/training_template/docker-compose-spark-slim.yml delete mode 100644 deploy/docker-compose/docker-deploy/training_template/docker-compose-spark.yml rename deploy/docker-compose/docker-deploy/training_template/public/{fateboard => fate_board}/conf/application.properties (100%) rename deploy/docker-compose/docker-deploy/training_template/public/{fateboard => fate_board}/conf/ssh.properties (100%) diff --git a/deploy/docker-compose/README_zh.md b/deploy/docker-compose/README_zh.md index eafd257653..74d391afe7 100644 --- a/deploy/docker-compose/README_zh.md +++ b/deploy/docker-compose/README_zh.md @@ -45,13 +45,10 @@ RegistryURI=hub.c.163.com 如果运行机没有FATE组件的镜像,可以通过以下命令从Docker Hub获取镜像。FATE镜像的版本``可在[release页面](https://github.com/FederatedAI/FATE/releases)上查看,其中serving镜像的版本信息在[这个页面](https://github.com/FederatedAI/FATE-Serving/releases): ```bash -docker pull federatedai/eggroll:-release -docker pull federatedai/fateboard:-release -docker pull federatedai/fateflow:-release -docker pull federatedai/serving-server:-release -docker pull federatedai/serving-proxy:-release -docker pull federatedai/serving-admin:-release -docker pull bitnami/zookeeper:3.7.0 +docker pull federatedai/eggroll:3.2.0-release +docker pull federatedai/fateflow:2.2.0-release +docker pull federatedai/osx:2.2.0-release +docker pull federatedai/fateboard:2.1.1-release docker pull mysql:8.0.28 ``` @@ -60,14 +57,10 @@ docker pull mysql:8.0.28 ```bash $ docker images REPOSITORY TAG -federatedai/eggroll -release -federatedai/fateboard -release -federatedai/fateflow -release -federatedai/client -release -federatedai/serving-server -release -federatedai/serving-proxy -release -federatedai/serving-admin -release -bitnami/zookeeper 3.7.0 +federatedai/fateflow 2.2.0-release +federatedai/eggroll 3.2.0-release +federatedai/osx 2.2.0-release +federatedai/fateboard 2.1.1-release mysql 8.0.28 ``` @@ -105,23 +98,39 @@ party_list=(10000 9999) party_ip_list=(192.168.7.1 192.168.7.2) serving_ip_list=(192.168.7.1 192.168.7.2) +# Engines: +# Computing : Eggroll, Spark, Spark_local computing=Eggroll -federation=Eggroll +# Federation: OSX(computing: Eggroll/Spark/Spark_local), Pulsar/RabbitMQ(computing: Spark/Spark_local) +federation=OSX +# Storage: Eggroll(computing: Eggroll), HDFS(computing: Spark), LocalFS(computing: Spark_local) storage=Eggroll - +# Algorithm: Basic, NN, ALL algorithm=Basic -device=IPCL - -compute_core=4 - -...... +# Device: CPU, IPCL, GPU +device=CPU + +# spark and eggroll +compute_core=16 + +# You only need to configure this parameter when you want to use the GPU, the default value is 1 +gpu_count=0 + +# modify if you are going to use an external db +mysql_ip=mysql +mysql_user=fate +mysql_password=fate_dev +mysql_db=fate_flow +serverTimezone=UTC + +name_node=hdfs://namenode:9000 + +# Define fateboard login information +fateboard_username=admin +fateboard_password=admin ``` -* 使用Spark+Rabbitmq的部署方式的文档可以参考[这里](../docs/FATE_On_Spark.md). -* 使用Spark+Pulsar的部署方式的文档可以参考[这里](../docs/FATE_On_Spark_With_Pulsar.md). -* 使用Spark+local Pulsar的部署方式的文档可以参考[这里](TBD) - 使用Docker-compose部署FATE可以支持多种种不同的类型引擎的组合(对computing federation storage的选择),关于不同类型的FATE的更多细节查看: [不同类型FATE的架构介绍](../docs/Introduction_to_Engine_Architecture_zh.md)。 `algorithm`和`device`的配置可以查看这里[FATE_Algorithm_and_Computational_Acceleration_Selection.md](../docs/FATE_Algorithm_and_Computational_Acceleration_Selection.md) @@ -152,23 +161,6 @@ total 0 drwxr-xr-x. 2 fate docker 6 May 27 00:51 fate ``` -### GPU支持 - -从v1.11.1开始docker compose部署支持使用GPU的FATE部署,如果要使用GPU,你需要先搞定GPU的docker环境。可以参考docker的官方文档()。 - -要使用GPU需要修改配置,这两个都需要修改 - -```sh -algorithm=NN -device=GPU - -gpu_count=1 -``` - -FATE GPU的使用只有fateflow组件,所以每个Party最少需要有一个GPU。 - -*gpu_count会映射为count,参考 [Docker compose GPU support](https://docs.docker.com/compose/gpu-support/)* - ### 执行部署脚本 **注意:**在运行以下命令之前,所有目标主机必须 @@ -186,9 +178,9 @@ FATE GPU的使用只有fateflow组件,所以每个Party最少需要有一个GP bash ./generate_config.sh # 生成部署文件 ``` -脚本将会生成10000、9999两个组织(Party)的部署文件,然后打包成tar文件。接着把tar文件`confs-.tar`、`serving-.tar`分别复制到party对应的主机上并解包,解包后的文件默认在`/data/projects/fate`目录下。然后脚本将远程登录到这些主机并使用docker compose命令启动FATE实例。 +脚本将会生成10000、9999两个组织(Party)的部署文件,然后打包成tar文件。接着把tar文件`confs-.tar`复制到party对应的主机上并解包,解包后的文件默认在`/data/projects/fate`目录下。然后脚本将远程登录到这些主机并使用docker compose命令启动FATE实例。 -默认情况下,脚本会同时启动训练和服务集群。 如果您需要单独启动它们,请将 `--training` 或 `--serving` 添加到 `docker_deploy.sh` 中,如下所示。 +默认情况下,脚本会同时启动训练和服务集群。 如果您需要单独启动它们,请将 `--training` 添加到 `docker_deploy.sh` 中,如下所示。 (可选)要部署各方训练集群,请使用以下命令: @@ -196,12 +188,6 @@ bash ./generate_config.sh # 生成部署文件 bash ./docker_deploy.sh all --training ``` -(可选)要部署各方服务集群,请使用以下命令: - -```bash -bash ./docker_deploy.sh all --serving -``` - (可选)要将 FATE 部署到单个目标主机,请使用以下命令和参与方的 ID(下例中为 10000): ```bash @@ -224,20 +210,19 @@ ssh fate@192.168.7.1 ```bash cd /data/projects/fate/confs-10000 -docker compose ps +docker-compose ps ``` 输出显示如下,若各个组件状态都是`Up`状态,并且fateflow的状态还是(healthy),说明部署成功。 ```bash NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS -confs-10000-client-1 federatedai/client:2.0.0-release "bash -c 'pipeline i…" client About a minute ago Up About a minute 0.0.0.0:20000->20000/tcp, :::20000->20000/tcp -confs-10000-clustermanager-1 federatedai/eggroll:2.0.0-release "/tini -- bash -c 'j…" clustermanager About a minute ago Up About a minute 4670/tcp -confs-10000-fateboard-1 federatedai/fateboard:2.0.0-release "/bin/sh -c 'java -D…" fateboard About a minute ago Up About a minute 0.0.0.0:8080->8080/tcp, :::8080->8080/tcp -confs-10000-fateflow-1 federatedai/fateflow:2.0.0-release "/bin/bash -c 'set -…" fateflow About a minute ago Up About a minute (healthy) 0.0.0.0:9360->9360/tcp, :::9360->9360/tcp, 0.0.0.0:9380->9380/tcp, :::9380->9380/tcp -confs-10000-mysql-1 mysql:8.0.28 "docker-entrypoint.s…" mysql About a minute ago Up About a minute 3306/tcp, 33060/tcp -confs-10000-nodemanager-1 federatedai/eggroll:2.0.0-release "/tini -- bash -c 'j…" nodemanager About a minute ago Up About a minute 4671/tcp -confs-10000-osx-1 federatedai/osx:2.0.0-release "/tini -- bash -c 'j…" osx About a minute ago Up About a minute 0.0.0.0:9370->9370/tcp, :::9370->9370/tcp +confs-10000-clustermanager-1 federatedai/eggroll:3.2.0-release "/tini -- bash -c 'j…" clustermanager About a minute ago Up About a minute 4670/tcp +confs-10000-fateflow-1 federatedai/fateflow:2.2.0-release "/bin/bash -c 'set -…" fateflow About a minute ago Up About a minute (healthy) 192.168.7.1:9360->9360/tcp, :::9360->9360/tcp, 192.168.7.1:9380->9380/tcp, :::9380->9380/tcp +confs-10000-mysql-1 mysql:8.0.28 "docker-entrypoint.s…" mysql About a minute ago Up About a minute 3306/tcp, 33060/tcp +confs-10000-nodemanager-1 federatedai/eggroll:3.2.0-release "/tini -- bash -c 'j…" nodemanager About a minute ago Up About a minute 4671/tcp +confs-10000-osx-1 federatedai/osx:2.2.0-release "/tini -- bash -c 'j…" osx About a minute ago Up About a minute 192.168.7.1:9370->9370/tcp, :::9370->9370/tcp +confs-10000-fateboard-1 federatedai/fateboard:2.1.1-release "sh -c 'java -Dsprin…" fateboard About a minute ago Up About a minute 192.168.7.1:8080->8080/tcp ``` ### 验证部署 @@ -249,8 +234,8 @@ docker-compose上的FATE启动成功之后需要验证各个服务是否都正 ```bash # 在192.168.7.1上执行下列命令 -# 进入client组件容器内部 -$ docker compose exec client bash +# 进入fateflow组件容器内部 +$ docker-compose exec fateflow bash # toy 验证 $ flow test toy --guest-party-id 10000 --host-party-id 9999 ``` @@ -258,281 +243,148 @@ $ flow test toy --guest-party-id 10000 --host-party-id 9999 如果测试通过,屏幕将显示类似如下消息: ```bash -"2019-08-29 07:21:25,353 - secure_add_guest.py[line:96] - INFO: begin to init parameters of secure add example guest" -"2019-08-29 07:21:25,354 - secure_add_guest.py[line:99] - INFO: begin to make guest data" -"2019-08-29 07:21:26,225 - secure_add_guest.py[line:102] - INFO: split data into two random parts" -"2019-08-29 07:21:29,140 - secure_add_guest.py[line:105] - INFO: share one random part data to host" -"2019-08-29 07:21:29,237 - secure_add_guest.py[line:108] - INFO: get share of one random part data from host" -"2019-08-29 07:21:33,073 - secure_add_guest.py[line:111] - INFO: begin to get sum of guest and host" -"2019-08-29 07:21:33,920 - secure_add_guest.py[line:114] - INFO: receive host sum from guest" -"2019-08-29 07:21:34,118 - secure_add_guest.py[line:121] - INFO: success to calculate secure_sum, it is 2000.0000000000002" +toy test job xxxxx is success ``` -### 验证Serving-Service功能 +### 上传数据,发起任务 #### Host方操作 -##### 进入party10000 client容器 +##### 进入party10000 fateflow容器 ```bash cd /data/projects/fate/confs-10000 -docker compose exec client bash +docker-compose exec fateflow bash ``` ##### 上传host数据 - +执行python脚本,上传数据 ```bash -flow data upload -c fateflow/examples/upload/upload_host.json +# 上传数据(单边的, 双边需要在另一方再次执行) +from fate_client.pipeline import FateFlowPipeline + +guest_data_path="/data/projects/fate/examples/data/breast_hetero_guest.csv" +host_data_path="/data/projects/fate/examples/data/breast_hetero_host.csv" + +data_pipeline = FateFlowPipeline().set_parties(local="0") +guest_meta = { + "delimiter": ",", "dtype": "float64", "label_type": "int64","label_name": "y", "match_id_name": "id" + } +host_meta = { + "delimiter": ",", "input_format": "dense", "match_id_name": "id" + } +data_pipeline.transform_local_file_to_dataframe(file=guest_data_path, namespace="experiment", name="breast_hetero_guest", + meta=guest_meta, head=True, extend_sid=True) +data_pipeline.transform_local_file_to_dataframe(file=host_data_path, namespace="experiment", name="breast_hetero_host", + meta=host_meta, head=True, extend_sid=True) ``` #### Guest方操作 -##### 进入party9999 client容器 +##### 进入party9999 fateflow容器 ```bash cd /data/projects/fate/confs-9999 -docker compose exec client bash +docker-compose exec fateflow bash ``` ##### 上传guest数据 - +执行python脚本,上传数据 ```bash -flow data upload -c fateflow/examples/upload/upload_guest.json +# 上传数据(单边的, 双边需要在另一方再次执行) +from fate_client.pipeline import FateFlowPipeline + +guest_data_path="/data/projects/fate/examples/data/breast_hetero_guest.csv" +host_data_path="/data/projects/fate/examples/data/breast_hetero_host.csv" + +data_pipeline = FateFlowPipeline().set_parties(local="0") +guest_meta = { + "delimiter": ",", "dtype": "float64", "label_type": "int64","label_name": "y", "match_id_name": "id" + } +host_meta = { + "delimiter": ",", "input_format": "dense", "match_id_name": "id" + } +data_pipeline.transform_local_file_to_dataframe(file=guest_data_path, namespace="experiment", name="breast_hetero_guest", + meta=guest_meta, head=True, extend_sid=True) +data_pipeline.transform_local_file_to_dataframe(file=host_data_path, namespace="experiment", name="breast_hetero_host", + meta=host_meta, head=True, extend_sid=True) ``` ##### 提交任务 - -```bash -flow job submit -d fateflow/examples/lr/test_hetero_lr_job_dsl.json -c fateflow/examples/lr/test_hetero_lr_job_conf.json -``` - -output: - -```json -{ - "data": { - "board_url": "http://fateboard:8080/index.html#/dashboard?job_id=202111230933232084530&role=guest&party_id=9999", - "code": 0, - "dsl_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/job_dsl.json", - "job_id": "202111230933232084530", - "logs_directory": "/data/projects/fate/fate_flow/logs/202111230933232084530", - "message": "success", - "model_info": { - "model_id": "arbiter-10000#guest-9999#host-10000#model", - "model_version": "202111230933232084530" - }, - "pipeline_dsl_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/pipeline_dsl.json", - "runtime_conf_on_party_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/guest/9999/job_runtime_on_party_conf.json", - "runtime_conf_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/job_runtime_conf.json", - "train_runtime_conf_path": "/data/projects/fate/fate_flow/jobs/202111230933232084530/train_runtime_conf.json" - }, - "jobId": "202111230933232084530", - "retcode": 0, - "retmsg": "success" -} -``` - -##### 查看训练任务状态 - -```bash -flow task query -r guest -j 202111230933232084530 | grep -w f_status -``` - -output: - -```bash - "f_status": "success", - "f_status": "waiting", - "f_status": "running", - "f_status": "waiting", - "f_status": "waiting", - "f_status": "success", - "f_status": "success", -``` - -等到所有的`waiting`状态变为`success`. - -##### 部署模型 - -```bash -flow model deploy --model-id arbiter-10000#guest-9999#host-10000#model --model-version 202111230933232084530 -``` - -```json -{ - "data": { - "arbiter": { - "10000": 0 - }, - "detail": { - "arbiter": { - "10000": { - "retcode": 0, - "retmsg": "deploy model of role arbiter 10000 success" - } - }, - "guest": { - "9999": { - "retcode": 0, - "retmsg": "deploy model of role guest 9999 success" - } - }, - "host": { - "10000": { - "retcode": 0, - "retmsg": "deploy model of role host 10000 success" - } - } - }, - "guest": { - "9999": 0 - }, - "host": { - "10000": 0 - }, - "model_id": "arbiter-10000#guest-9999#host-10000#model", - "model_version": "202111230954255210490" - }, - "retcode": 0, - "retmsg": "success" -} -``` - -*后面需要用到的`model_version`都是这一步得到的`"model_version": "202111230954255210490"`* - -##### 修改加载模型的配置 - -```bash -cat > fateflow/examples/model/publish_load_model.json < fateflow/examples/model/bind_model_service.json </ # 删除docker-compose部署文件 #### CPU指令集问题 -解决办法:查看[wiki](https://github.com/FederatedAI/KubeFATE/wiki/KubeFATE)页面的storage-service部分 +解决办法:查看[wiki](https://github.com/FederatedAI/KubeFATE/wiki/KubeFATE)页面的storage-service部分。 diff --git a/deploy/docker-compose/docker-deploy/docker_deploy.sh b/deploy/docker-compose/docker-deploy/docker_deploy.sh index 335b6e9b4e..4ed50fecd9 100644 --- a/deploy/docker-compose/docker-deploy/docker_deploy.sh +++ b/deploy/docker-compose/docker-deploy/docker_deploy.sh @@ -18,9 +18,10 @@ cd $BASEDIR WORKINGDIR=$(pwd) # fetch fate-python image +echo "fetch fate-python image" source ${WORKINGDIR}/.env source ${WORKINGDIR}/parties.conf - +echo "finished" cd ${WORKINGDIR} Deploy() { @@ -41,12 +42,14 @@ Deploy() { if [ "$2" != "" ]; then case $2 in --training) + echo "training" DeployPartyInternal $party if [ "${exchangeip}" != "" ]; then DeployPartyInternal exchange fi ;; --serving) + echo "serving" DeployPartyServing $party ;; esac @@ -143,33 +146,33 @@ DeployPartyInternal() { echo "Unable to find Party: $target_party_id, please check you input." return 1 fi - if [ "$3" != "" ]; then user=$3 fi - + echo "handleLocally confs" handleLocally confs + echo "handleLocally confs finished" if [ "$local_flag" == "true" ]; then return 0 fi - + echo "scp -P ${SSH_PORT} ${WORKINGDIR}/outputs/confs-$target_party_id.tar $user@$target_party_ip:~/" scp -P ${SSH_PORT} ${WORKINGDIR}/outputs/confs-$target_party_id.tar $user@$target_party_ip:~/ #rm -f ${WORKINGDIR}/outputs/confs-$target_party_id.tar echo "$target_party_ip training cluster copy is ok!" - ssh -p ${SSH_PORT} -tt $user@$target_party_ip <#${fateboard_username}#g" ./confs-"$party_id"/confs/fateboard/conf/application.properties - sed -i "s##${fateboard_password}#g" ./confs-"$party_id"/confs/fateboard/conf/application.properties + sed -i "s##${fateboard_username}#g" ./confs-"$party_id"/confs/fateboard/conf/application.properties + sed -i "s##${fateboard_password}#g" ./confs-"$party_id"/confs/fateboard/conf/application.properties + echo fateboard module of "$party_id" done! # mysql @@ -521,26 +522,27 @@ EOF module_name=exchange cd ${WORKINGDIR} rm -rf confs-exchange/ - mkdir -p confs-exchange/conf/ + mkdir -p confs-exchange/conf/eggroll + mkdir -p confs-exchange/conf/osx cp ${WORKINGDIR}/.env confs-exchange/ cp training_template/docker-compose-exchange.yml confs-exchange/docker-compose.yml - cp -r training_template/backends/eggroll/conf/* confs-exchange/conf/ - + cp -r training_template/backends/eggroll/conf/* confs-exchange/conf/eggroll + cp -r training_template/backends/osx/conf/* confs-exchange/conf/osx if [ "$RegistryURI" != "" ]; then sed -i 's#federatedai#${RegistryURI}/federatedai#g' ./confs-exchange/docker-compose.yml fi - sed -i "s##${proxy_ip}#g" ./confs-exchange/conf/eggroll.properties - sed -i "s##${proxy_port}#g" ./confs-exchange/conf/eggroll.properties - sed -i "s##exchange#g" ./confs-exchange/conf/eggroll.properties - sed -i "s/coordinator=.*/coordinator=exchange/g" ./confs-exchange/conf/eggroll.properties - sed -i "s/ip=.*/ip=0.0.0.0/g" ./confs-exchange/conf/eggroll.properties - - cat >./confs-exchange/conf/route_table.json <#${proxy_ip}#g" ./confs-exchange/conf/eggroll/eggroll.properties + sed -i "s##${proxy_port}#g" ./confs-exchange/conf/eggroll/eggroll.properties + sed -i "s##exchange#g" ./confs-exchange/conf/eggroll/eggroll.properties + sed -i "s/coordinator=.*/coordinator=exchange/g" ./confs-exchange/conf/eggroll/eggroll.properties + sed -i "s/ip=.*/ip=0.0.0.0/g" ./confs-exchange/conf/eggroll/eggroll.properties + cat >./confs-exchange/conf/osx/broker/route_table.json < - - fs.default.name - hdfs://0.0.0.0:9000 - - diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/hadoop.env b/deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/hadoop.env deleted file mode 100644 index 95b3d10289..0000000000 --- a/deploy/docker-compose/docker-deploy/training_template/backends/spark/hadoop/hadoop.env +++ /dev/null @@ -1,43 +0,0 @@ -CORE_CONF_fs_defaultFS=hdfs://namenode:9000 -CORE_CONF_hadoop_http_staticuser_user=root -CORE_CONF_hadoop_proxyuser_hue_hosts=* -CORE_CONF_hadoop_proxyuser_hue_groups=* -CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec - -HDFS_CONF_dfs_webhdfs_enabled=true -HDFS_CONF_dfs_permissions_enabled=false -HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false - -YARN_CONF_yarn_log___aggregation___enable=true -YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/ -YARN_CONF_yarn_resourcemanager_recovery_enabled=true -YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore -YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler -YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=8192 -YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=4 -YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate -YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true -YARN_CONF_yarn_resourcemanager_hostname=resourcemanager -YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032 -YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030 -YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031 -YARN_CONF_yarn_timeline___service_enabled=true -YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true -YARN_CONF_yarn_timeline___service_hostname=historyserver -YARN_CONF_mapreduce_map_output_compress=true -YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec -YARN_CONF_yarn_nodemanager_resource_memory___mb=16384 -YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8 -YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5 -YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs -YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle - -MAPRED_CONF_mapreduce_framework_name=yarn -MAPRED_CONF_mapred_child_java_opts=-Xmx4096m -MAPRED_CONF_mapreduce_map_memory_mb=4096 -MAPRED_CONF_mapreduce_reduce_memory_mb=8192 -MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m -MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m -MAPRED_CONF_yarn_app_mapreduce_am_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/ -MAPRED_CONF_mapreduce_map_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/ -MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/ diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/nginx.conf b/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/nginx.conf deleted file mode 100644 index e448fd2863..0000000000 --- a/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/nginx.conf +++ /dev/null @@ -1,68 +0,0 @@ - -#user nobody; -worker_processes 2; - -#error_log logs/error.log; -#error_log logs/error.log notice; -error_log /dev/stdout info; -error_log /dev/stderr error; - -#pid logs/nginx.pid; - - -events { - worker_connections 1024; -} - - -http { - include mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" "$http_host" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for" ' - '$upstream_status $upstream_addr ' - '$request_time $upstream_response_time' - ; - - access_log logs/access.log main; - - sendfile on; - #tcp_nopush on; - - keepalive_timeout 65; - underscores_in_headers on; - - #gzip on; - lua_package_path "$prefix/lua/?.lua;;"; - init_worker_by_lua_file 'lua/initialize.lua'; - - upstream http_cluster { - server fateflow:9380; # just an invalid address as a place holder - balancer_by_lua_file 'lua/balancer.lua'; - } - - upstream grpc_cluster { - server fateflow:9360; # just an invalid address as a place holder - balancer_by_lua_file 'lua/balancer.lua'; - } - - include vhost/*.conf; -} - -stream { - log_format tcp_proxy '$remote_addr [$time_local] ' - '$protocol $status $bytes_sent $bytes_received ' - '$session_time "$upstream_addr" ' - '"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"'; - - access_log logs/tcp-access.log tcp_proxy; - - server { - listen 9128; - proxy_connect_timeout 1s; - proxy_timeout 3s; - proxy_pass 127.0.0.1:3128; - } -} diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/route_table.yaml b/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/route_table.yaml deleted file mode 100644 index 57ae70325a..0000000000 --- a/deploy/docker-compose/docker-deploy/training_template/backends/spark/nginx/route_table.yaml +++ /dev/null @@ -1,27 +0,0 @@ -default: - proxy: - - host: 127.0.0.1 - http_port: 9300 - grpc_port: 9310 -local: - test_proxy: - - host: 127.0.0.1 - http_port: 9302 -9999: - proxy: - - host: 127.0.0.1 - http_port: 9300 - grpc_port: 9310 - fateflow: - - host: 127.0.0.1 - http_port: 9380 - grpc_port: 9360 -10000: - proxy: - - host: 127.0.0.1 - http_port: 9300 - grpc_port: 9310 - fateflow: - - host: 127.0.0.1 - http_port: 9380 - grpc_port: 9360 diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/pulsar/standalone.conf b/deploy/docker-compose/docker-deploy/training_template/backends/spark/pulsar/standalone.conf deleted file mode 100644 index 89793c753d..0000000000 --- a/deploy/docker-compose/docker-deploy/training_template/backends/spark/pulsar/standalone.conf +++ /dev/null @@ -1,899 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -### --- General broker settings --- ### - -# Message Size -maxMessageSize=134217728 - -# Zookeeper quorum connection string -zookeeperServers= - -# Configuration Store connection string -configurationStoreServers= - -brokerServicePort=6650 - - -# Port to use to server HTTP request -webServicePort=8080 - -# Hostname or IP address the service binds on, default is 0.0.0.0. -bindAddress=0.0.0.0 - -# Hostname or IP address the service advertises to the outside world. If not set, the value of InetAddress.getLocalHost().getHostName() is used. -advertisedAddress= - -# Number of threads to use for Netty IO. Default is set to 2 * Runtime.getRuntime().availableProcessors() -numIOThreads= - -# Number of threads to use for ordered executor. The ordered executor is used to operate with zookeeper, -# such as init zookeeper client, get namespace policies from zookeeper etc. It also used to split bundle. Default is 8 -numOrderedExecutorThreads=8 - -# Number of threads to use for HTTP requests processing. Default is set to 2 * Runtime.getRuntime().availableProcessors() -numHttpServerThreads= - -# Number of thread pool size to use for pulsar broker service. -# The executor in thread pool will do basic broker operation like load/unload bundle, update managedLedgerConfig, -# update topic/subscription/replicator message dispatch rate, do leader election etc. -# Default is Runtime.getRuntime().availableProcessors() -numExecutorThreadPoolSize= - -# Number of thread pool size to use for pulsar zookeeper callback service -# The cache executor thread pool is used for restarting global zookeeper session. -# Default is 10 -numCacheExecutorThreadPoolSize=10 - -# Max concurrent web requests -maxConcurrentHttpRequests=1024 - -# Name of the cluster to which this broker belongs to -clusterName=standalone - -# Enable cluster's failure-domain which can distribute brokers into logical region -failureDomainsEnabled=false - -# Zookeeper session timeout in milliseconds -zooKeeperSessionTimeoutMillis=30000 - -# ZooKeeper operation timeout in seconds -zooKeeperOperationTimeoutSeconds=30 - -# ZooKeeper cache expiry time in seconds -zooKeeperCacheExpirySeconds=300 - -# Time to wait for broker graceful shutdown. After this time elapses, the process will be killed -brokerShutdownTimeoutMs=60000 - -# Flag to skip broker shutdown when broker handles Out of memory error -skipBrokerShutdownOnOOM=false - -# Enable backlog quota check. Enforces action on topic when the quota is reached -backlogQuotaCheckEnabled=true - -# How often to check for topics that have reached the quota -backlogQuotaCheckIntervalInSeconds=60 - -# Default per-topic backlog quota limit -backlogQuotaDefaultLimitGB=10 - -# Default ttl for namespaces if ttl is not already configured at namespace policies. (disable default-ttl with value 0) -ttlDurationDefaultInSeconds=0 - -# Enable the deletion of inactive topics -brokerDeleteInactiveTopicsEnabled=true - -# How often to check for inactive topics -brokerDeleteInactiveTopicsFrequencySeconds=60 - -# Max pending publish requests per connection to avoid keeping large number of pending -# requests in memory. Default: 1000 -maxPendingPublishdRequestsPerConnection=1000 - -# How frequently to proactively check and purge expired messages -messageExpiryCheckIntervalInMinutes=5 - -# How long to delay rewinding cursor and dispatching messages when active consumer is changed -activeConsumerFailoverDelayTimeMillis=1000 - -# How long to delete inactive subscriptions from last consuming -# When it is 0, inactive subscriptions are not deleted automatically -subscriptionExpirationTimeMinutes=0 - -# Enable subscription message redelivery tracker to send redelivery count to consumer (default is enabled) -subscriptionRedeliveryTrackerEnabled=true - -# On KeyShared subscriptions, with default AUTO_SPLIT mode, use splitting ranges or -# consistent hashing to reassign keys to new consumers -subscriptionKeySharedUseConsistentHashing=false - -# On KeyShared subscriptions, number of points in the consistent-hashing ring. -# The higher the number, the more equal the assignment of keys to consumers -subscriptionKeySharedConsistentHashingReplicaPoints=100 - -# How frequently to proactively check and purge expired subscription -subscriptionExpiryCheckIntervalInMinutes=5 - -# Set the default behavior for message deduplication in the broker -# This can be overridden per-namespace. If enabled, broker will reject -# messages that were already stored in the topic -brokerDeduplicationEnabled=false - -# Maximum number of producer information that it's going to be -# persisted for deduplication purposes -brokerDeduplicationMaxNumberOfProducers=10000 - -# Number of entries after which a dedup info snapshot is taken. -# A bigger interval will lead to less snapshots being taken though it would -# increase the topic recovery time, when the entries published after the -# snapshot need to be replayed -brokerDeduplicationEntriesInterval=1000 - -# Time of inactivity after which the broker will discard the deduplication information -# relative to a disconnected producer. Default is 6 hours. -brokerDeduplicationProducerInactivityTimeoutMinutes=360 - -# When a namespace is created without specifying the number of bundle, this -# value will be used as the default -defaultNumberOfNamespaceBundles=4 - -# Enable check for minimum allowed client library version -clientLibraryVersionCheckEnabled=false - -# Path for the file used to determine the rotation status for the broker when responding -# to service discovery health checks -statusFilePath=/usr/local/apache/htdocs - -# Max number of unacknowledged messages allowed to receive messages by a consumer on a shared subscription. Broker will stop sending -# messages to consumer once, this limit reaches until consumer starts acknowledging messages back -# Using a value of 0, is disabling unackeMessage limit check and consumer can receive messages without any restriction -maxUnackedMessagesPerConsumer=50000 - -# Max number of unacknowledged messages allowed per shared subscription. Broker will stop dispatching messages to -# all consumers of the subscription once this limit reaches until consumer starts acknowledging messages back and -# unack count reaches to limit/2. Using a value of 0, is disabling unackedMessage-limit -# check and dispatcher can dispatch messages without any restriction -maxUnackedMessagesPerSubscription=200000 - -# Max number of unacknowledged messages allowed per broker. Once this limit reaches, broker will stop dispatching -# messages to all shared subscription which has higher number of unack messages until subscriptions start -# acknowledging messages back and unack count reaches to limit/2. Using a value of 0, is disabling -# unackedMessage-limit check and broker doesn't block dispatchers -maxUnackedMessagesPerBroker=0 - -# Once broker reaches maxUnackedMessagesPerBroker limit, it blocks subscriptions which has higher unacked messages -# than this percentage limit and subscription will not receive any new messages until that subscription acks back -# limit/2 messages -maxUnackedMessagesPerSubscriptionOnBrokerBlocked=0.16 - -# Tick time to schedule task that checks topic publish rate limiting across all topics -# Reducing to lower value can give more accuracy while throttling publish but -# it uses more CPU to perform frequent check. (Disable publish throttling with value 0) -topicPublisherThrottlingTickTimeMillis=2 - -# Tick time to schedule task that checks broker publish rate limiting across all topics -# Reducing to lower value can give more accuracy while throttling publish but -# it uses more CPU to perform frequent check. (Disable publish throttling with value 0) -brokerPublisherThrottlingTickTimeMillis=50 - -# Max Rate(in 1 seconds) of Message allowed to publish for a broker if broker publish rate limiting enabled -# (Disable message rate limit with value 0) -brokerPublisherThrottlingMaxMessageRate=0 - -# Max Rate(in 1 seconds) of Byte allowed to publish for a broker if broker publish rate limiting enabled -# (Disable byte rate limit with value 0) -brokerPublisherThrottlingMaxByteRate=0 - -# Default messages per second dispatch throttling-limit for every topic. Using a value of 0, is disabling default -# message dispatch-throttling -dispatchThrottlingRatePerTopicInMsg=0 - -# Default bytes per second dispatch throttling-limit for every topic. Using a value of 0, is disabling -# default message-byte dispatch-throttling -dispatchThrottlingRatePerTopicInByte=0 - -# Dispatch rate-limiting relative to publish rate. -# (Enabling flag will make broker to dynamically update dispatch-rate relatively to publish-rate: -# throttle-dispatch-rate = (publish-rate + configured dispatch-rate). -dispatchThrottlingRateRelativeToPublishRate=false - -# By default we enable dispatch-throttling for both caught up consumers as well as consumers who have -# backlog. -dispatchThrottlingOnNonBacklogConsumerEnabled=true - -# Precise dispathcer flow control according to history message number of each entry -preciseDispatcherFlowControl=false - -# Max number of concurrent lookup request broker allows to throttle heavy incoming lookup traffic -maxConcurrentLookupRequest=50000 - -# Max number of concurrent topic loading request broker allows to control number of zk-operations -maxConcurrentTopicLoadRequest=5000 - -# Max concurrent non-persistent message can be processed per connection -maxConcurrentNonPersistentMessagePerConnection=1000 - -# Number of worker threads to serve non-persistent topic -numWorkerThreadsForNonPersistentTopic=8 - -# Enable broker to load persistent topics -enablePersistentTopics=true - -# Enable broker to load non-persistent topics -enableNonPersistentTopics=true - -# Max number of producers allowed to connect to topic. Once this limit reaches, Broker will reject new producers -# until the number of connected producers decrease. -# Using a value of 0, is disabling maxProducersPerTopic-limit check. -maxProducersPerTopic=0 - -# Enforce producer to publish encrypted messages.(default disable). -encryptionRequireOnProducer=false - -# Max number of consumers allowed to connect to topic. Once this limit reaches, Broker will reject new consumers -# until the number of connected consumers decrease. -# Using a value of 0, is disabling maxConsumersPerTopic-limit check. -maxConsumersPerTopic=0 - -# Max number of subscriptions allowed to subscribe to topic. Once this limit reaches, broker will reject -# new subscription until the number of subscribed subscriptions decrease. -# Using a value of 0, is disabling maxSubscriptionsPerTopic limit check. -maxSubscriptionsPerTopic=0 - -# Max number of consumers allowed to connect to subscription. Once this limit reaches, Broker will reject new consumers -# until the number of connected consumers decrease. -# Using a value of 0, is disabling maxConsumersPerSubscription-limit check. -maxConsumersPerSubscription=0 - -# Max number of partitions per partitioned topic -# Use 0 or negative number to disable the check -maxNumPartitionsPerPartitionedTopic=0 - -### --- TLS --- ### -# Deprecated - Use webServicePortTls and brokerServicePortTls instead -tlsEnabled=false - -# Tls cert refresh duration in seconds (set 0 to check on every new connection) -tlsCertRefreshCheckDurationSec=300 - -# Path for the TLS certificate file -tlsCertificateFilePath= - -# Path for the TLS private key file -tlsKeyFilePath= - -# Path for the trusted TLS certificate file. -# This cert is used to verify that any certs presented by connecting clients -# are signed by a certificate authority. If this verification -# fails, then the certs are untrusted and the connections are dropped. -tlsTrustCertsFilePath= - -# Accept untrusted TLS certificate from client. -# If true, a client with a cert which cannot be verified with the -# 'tlsTrustCertsFilePath' cert will allowed to connect to the server, -# though the cert will not be used for client authentication. -tlsAllowInsecureConnection=false - -# Specify the tls protocols the broker will use to negotiate during TLS handshake -# (a comma-separated list of protocol names). -# Examples:- [TLSv1.2, TLSv1.1, TLSv1] -tlsProtocols= - -# Specify the tls cipher the broker will use to negotiate during TLS Handshake -# (a comma-separated list of ciphers). -# Examples:- [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256] -tlsCiphers= - -# Trusted client certificates are required for to connect TLS -# Reject the Connection if the Client Certificate is not trusted. -# In effect, this requires that all connecting clients perform TLS client -# authentication. -tlsRequireTrustedClientCertOnConnect=false - -### --- KeyStore TLS config variables --- ### -# Enable TLS with KeyStore type configuration in broker. -tlsEnabledWithKeyStore=false - -# TLS Provider for KeyStore type -tlsProvider= - -# TLS KeyStore type configuration in broker: JKS, PKCS12 -tlsKeyStoreType=JKS - -# TLS KeyStore path in broker -tlsKeyStore= - -# TLS KeyStore password for broker -tlsKeyStorePassword= - -# TLS TrustStore type configuration in broker: JKS, PKCS12 -tlsTrustStoreType=JKS - -# TLS TrustStore path in broker -tlsTrustStore= - -# TLS TrustStore password for broker -tlsTrustStorePassword= - -# Whether internal client use KeyStore type to authenticate with Pulsar brokers -brokerClientTlsEnabledWithKeyStore=false - -# The TLS Provider used by internal client to authenticate with other Pulsar brokers -brokerClientSslProvider= - -# TLS TrustStore type configuration for internal client: JKS, PKCS12 -# used by the internal client to authenticate with Pulsar brokers -brokerClientTlsTrustStoreType=JKS - -# TLS TrustStore path for internal client -# used by the internal client to authenticate with Pulsar brokers -brokerClientTlsTrustStore= - -# TLS TrustStore password for internal client, -# used by the internal client to authenticate with Pulsar brokers -brokerClientTlsTrustStorePassword= - -# Specify the tls cipher the internal client will use to negotiate during TLS Handshake -# (a comma-separated list of ciphers) -# e.g. [TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256]. -# used by the internal client to authenticate with Pulsar brokers -brokerClientTlsCiphers= - -# Specify the tls protocols the broker will use to negotiate during TLS handshake -# (a comma-separated list of protocol names). -# e.g. [TLSv1.2, TLSv1.1, TLSv1] -# used by the internal client to authenticate with Pulsar brokers -brokerClientTlsProtocols= - -# Enable or disable system topic -systemTopicEnabled=false - -# Enable or disable topic level policies, topic level policies depends on the system topic -# Please enable the system topic first. -topicLevelPoliciesEnabled=false - -# If a topic remains fenced for this number of seconds, it will be closed forcefully. -# If it is set to 0 or a negative number, the fenced topic will not be closed. -topicFencingTimeoutSeconds=0 - -### --- Authentication --- ### -# Role names that are treated as "proxy roles". If the broker sees a request with -#role as proxyRoles - it will demand to see a valid original principal. -proxyRoles= - -# If this flag is set then the broker authenticates the original Auth data -# else it just accepts the originalPrincipal and authorizes it (if required). -authenticateOriginalAuthData=false - -# Enable authentication -authenticationEnabled=false - -# Autentication provider name list, which is comma separated list of class names -authenticationProviders= - -# Enforce authorization -authorizationEnabled=false - -# Authorization provider fully qualified class-name -authorizationProvider=org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider - -# Allow wildcard matching in authorization -# (wildcard matching only applicable if wildcard-char: -# * presents at first or last position eg: *.pulsar.service, pulsar.service.*) -authorizationAllowWildcardsMatching=false - -# Role names that are treated as "super-user", meaning they will be able to do all admin -# operations and publish/consume from all topics -superUserRoles= - -# Authentication settings of the broker itself. Used when the broker connects to other brokers, -# either in same or other clusters -brokerClientAuthenticationPlugin= -brokerClientAuthenticationParameters= - -# Supported Athenz provider domain names(comma separated) for authentication -athenzDomainNames= - -# When this parameter is not empty, unauthenticated users perform as anonymousUserRole -anonymousUserRole= - -# The token "claim" that will be interpreted as the authentication "role" or "principal" by AuthenticationProviderToken (defaults to "sub" if blank) -tokenAuthClaim= - -# The token audience "claim" name, e.g. "aud", that will be used to get the audience from token. -# If not set, audience will not be verified. -tokenAudienceClaim= - -# The token audience stands for this broker. The field `tokenAudienceClaim` of a valid token, need contains this. -tokenAudience= - -### --- BookKeeper Client --- ### - -# Authentication plugin to use when connecting to bookies -bookkeeperClientAuthenticationPlugin= - -# BookKeeper auth plugin implementatation specifics parameters name and values -bookkeeperClientAuthenticationParametersName= -bookkeeperClientAuthenticationParameters= - -# Timeout for BK add / read operations -bookkeeperClientTimeoutInSeconds=30 - -# Speculative reads are initiated if a read request doesn't complete within a certain time -# Using a value of 0, is disabling the speculative reads -bookkeeperClientSpeculativeReadTimeoutInMillis=0 - -# Number of channels per bookie -bookkeeperNumberOfChannelsPerBookie=16 - -# Enable bookies health check. Bookies that have more than the configured number of failure within -# the interval will be quarantined for some time. During this period, new ledgers won't be created -# on these bookies -bookkeeperClientHealthCheckEnabled=true -bookkeeperClientHealthCheckIntervalSeconds=60 -bookkeeperClientHealthCheckErrorThresholdPerInterval=5 -bookkeeperClientHealthCheckQuarantineTimeInSeconds=1800 - -#bookie quarantine ratio to avoid all clients quarantine the high pressure bookie servers at the same time -bookkeeperClientQuarantineRatio=1.0 - -# Enable rack-aware bookie selection policy. BK will chose bookies from different racks when -# forming a new bookie ensemble -# This parameter related to ensemblePlacementPolicy in conf/bookkeeper.conf, if enabled, ensemblePlacementPolicy -# should be set to org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicy -bookkeeperClientRackawarePolicyEnabled=true - -# Enable region-aware bookie selection policy. BK will chose bookies from -# different regions and racks when forming a new bookie ensemble. -# If enabled, the value of bookkeeperClientRackawarePolicyEnabled is ignored -# This parameter related to ensemblePlacementPolicy in conf/bookkeeper.conf, if enabled, ensemblePlacementPolicy -# should be set to org.apache.bookkeeper.client.RegionAwareEnsemblePlacementPolicy -bookkeeperClientRegionawarePolicyEnabled=false - -# Minimum number of racks per write quorum. BK rack-aware bookie selection policy will try to -# get bookies from at least 'bookkeeperClientMinNumRacksPerWriteQuorum' racks for a write quorum. -bookkeeperClientMinNumRacksPerWriteQuorum=1 - -# Enforces rack-aware bookie selection policy to pick bookies from 'bookkeeperClientMinNumRacksPerWriteQuorum' -# racks for a writeQuorum. -# If BK can't find bookie then it would throw BKNotEnoughBookiesException instead of picking random one. -bookkeeperClientEnforceMinNumRacksPerWriteQuorum=false - -# Enable/disable reordering read sequence on reading entries. -bookkeeperClientReorderReadSequenceEnabled=false - -# Enable bookie isolation by specifying a list of bookie groups to choose from. Any bookie -# outside the specified groups will not be used by the broker -bookkeeperClientIsolationGroups= - -# Enable bookie secondary-isolation group if bookkeeperClientIsolationGroups doesn't -# have enough bookie available. -bookkeeperClientSecondaryIsolationGroups= - -# Minimum bookies that should be available as part of bookkeeperClientIsolationGroups -# else broker will include bookkeeperClientSecondaryIsolationGroups bookies in isolated list. -bookkeeperClientMinAvailableBookiesInIsolationGroups= - -# Set the client security provider factory class name. -# Default: org.apache.bookkeeper.tls.TLSContextFactory -bookkeeperTLSProviderFactoryClass=org.apache.bookkeeper.tls.TLSContextFactory - -# Enable tls authentication with bookie -bookkeeperTLSClientAuthentication=false - -# Supported type: PEM, JKS, PKCS12. Default value: PEM -bookkeeperTLSKeyFileType=PEM - -#Supported type: PEM, JKS, PKCS12. Default value: PEM -bookkeeperTLSTrustCertTypes=PEM - -# Path to file containing keystore password, if the client keystore is password protected. -bookkeeperTLSKeyStorePasswordPath= - -# Path to file containing truststore password, if the client truststore is password protected. -bookkeeperTLSTrustStorePasswordPath= - -# Path for the TLS private key file -bookkeeperTLSKeyFilePath= - -# Path for the TLS certificate file -bookkeeperTLSCertificateFilePath= - -# Path for the trusted TLS certificate file -bookkeeperTLSTrustCertsFilePath= - -# Enable/disable disk weight based placement. Default is false -bookkeeperDiskWeightBasedPlacementEnabled=false - -# Set the interval to check the need for sending an explicit LAC -# A value of '0' disables sending any explicit LACs. Default is 0. -bookkeeperExplicitLacIntervalInMills=0 - -# Use older Bookkeeper wire protocol with bookie -bookkeeperUseV2WireProtocol=true - -# Expose bookkeeper client managed ledger stats to prometheus. default is false -# bookkeeperClientExposeStatsToPrometheus=false - -### --- Managed Ledger --- ### - -# Number of bookies to use when creating a ledger -managedLedgerDefaultEnsembleSize=1 - -# Number of copies to store for each message -managedLedgerDefaultWriteQuorum=1 - -# Number of guaranteed copies (acks to wait before write is complete) -managedLedgerDefaultAckQuorum=1 - -# How frequently to flush the cursor positions that were accumulated due to rate limiting. (seconds). -# Default is 60 seconds -managedLedgerCursorPositionFlushSeconds = 60 - -# Default type of checksum to use when writing to BookKeeper. Default is "CRC32C" -# Other possible options are "CRC32", "MAC" or "DUMMY" (no checksum). -managedLedgerDigestType=CRC32C - -# Number of threads to be used for managed ledger tasks dispatching -managedLedgerNumWorkerThreads=4 - -# Number of threads to be used for managed ledger scheduled tasks -managedLedgerNumSchedulerThreads=4 - -# Amount of memory to use for caching data payload in managed ledger. This memory -# is allocated from JVM direct memory and it's shared across all the topics -# running in the same broker. By default, uses 1/5th of available direct memory -managedLedgerCacheSizeMB= - -# Whether we should make a copy of the entry payloads when inserting in cache -managedLedgerCacheCopyEntries=false - -# Threshold to which bring down the cache level when eviction is triggered -managedLedgerCacheEvictionWatermark=0.9 - -# Configure the cache eviction frequency for the managed ledger cache (evictions/sec) -managedLedgerCacheEvictionFrequency=100.0 - -# All entries that have stayed in cache for more than the configured time, will be evicted -managedLedgerCacheEvictionTimeThresholdMillis=1000 - -# Configure the threshold (in number of entries) from where a cursor should be considered 'backlogged' -# and thus should be set as inactive. -managedLedgerCursorBackloggedThreshold=1000 - -# Rate limit the amount of writes generated by consumer acking the messages -managedLedgerDefaultMarkDeleteRateLimit=0.1 - -# Max number of entries to append to a ledger before triggering a rollover -# A ledger rollover is triggered on these conditions -# * Either the max rollover time has been reached -# * or max entries have been written to the ledged and at least min-time -# has passed -managedLedgerMaxEntriesPerLedger=50000 - -# Minimum time between ledger rollover for a topic -managedLedgerMinLedgerRolloverTimeMinutes=10 - -# Maximum time before forcing a ledger rollover for a topic -managedLedgerMaxLedgerRolloverTimeMinutes=240 - -# Max number of entries to append to a cursor ledger -managedLedgerCursorMaxEntriesPerLedger=50000 - -# Max time before triggering a rollover on a cursor ledger -managedLedgerCursorRolloverTimeInSeconds=14400 - -# Maximum ledger size before triggering a rollover for a topic (MB) -managedLedgerMaxSizePerLedgerMbytes=2048 - -# Max number of "acknowledgment holes" that are going to be persistently stored. -# When acknowledging out of order, a consumer will leave holes that are supposed -# to be quickly filled by acking all the messages. The information of which -# messages are acknowledged is persisted by compressing in "ranges" of messages -# that were acknowledged. After the max number of ranges is reached, the information -# will only be tracked in memory and messages will be redelivered in case of -# crashes. -managedLedgerMaxUnackedRangesToPersist=10000 - -# Max number of "acknowledgment holes" that can be stored in Zookeeper. If number of unack message range is higher -# than this limit then broker will persist unacked ranges into bookkeeper to avoid additional data overhead into -# zookeeper. -managedLedgerMaxUnackedRangesToPersistInZooKeeper=1000 - -# Skip reading non-recoverable/unreadable data-ledger under managed-ledger's list. It helps when data-ledgers gets -# corrupted at bookkeeper and managed-cursor is stuck at that ledger. -autoSkipNonRecoverableData=false - -# operation timeout while updating managed-ledger metadata. -managedLedgerMetadataOperationsTimeoutSeconds=60 - -# Read entries timeout when broker tries to read messages from bookkeeper. -managedLedgerReadEntryTimeoutSeconds=0 - -# Add entry timeout when broker tries to publish message to bookkeeper (0 to disable it). -managedLedgerAddEntryTimeoutSeconds=0 - -# New entries check delay for the cursor under the managed ledger. -# If no new messages in the topic, the cursor will try to check again after the delay time. -# For consumption latency sensitive scenario, can set to a smaller value or set to 0. -# Of course, use a smaller value may degrade consumption throughput. Default is 10ms. -managedLedgerNewEntriesCheckDelayInMillis=10 - -# Use Open Range-Set to cache unacked messages -managedLedgerUnackedRangesOpenCacheSetEnabled=true - -# Managed ledger prometheus stats latency rollover seconds (default: 60s) -managedLedgerPrometheusStatsLatencyRolloverSeconds=60 - -# Whether trace managed ledger task execution time -managedLedgerTraceTaskExecution=true - -### --- Load balancer --- ### - -loadManagerClassName=org.apache.pulsar.broker.loadbalance.NoopLoadManager - -# Enable load balancer -loadBalancerEnabled=false - -# Percentage of change to trigger load report update -loadBalancerReportUpdateThresholdPercentage=10 - -# maximum interval to update load report -loadBalancerReportUpdateMaxIntervalMinutes=15 - -# Frequency of report to collect -loadBalancerHostUsageCheckIntervalMinutes=1 - -# Load shedding interval. Broker periodically checks whether some traffic should be offload from -# some over-loaded broker to other under-loaded brokers -loadBalancerSheddingIntervalMinutes=1 - -# Prevent the same topics to be shed and moved to other broker more that once within this timeframe -loadBalancerSheddingGracePeriodMinutes=30 - -# Usage threshold to allocate max number of topics to broker -loadBalancerBrokerMaxTopics=50000 - -# Interval to flush dynamic resource quota to ZooKeeper -loadBalancerResourceQuotaUpdateIntervalMinutes=15 - -# enable/disable namespace bundle auto split -loadBalancerAutoBundleSplitEnabled=true - -# enable/disable automatic unloading of split bundles -loadBalancerAutoUnloadSplitBundlesEnabled=true - -# maximum topics in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxTopics=1000 - -# maximum sessions (producers + consumers) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxSessions=1000 - -# maximum msgRate (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxMsgRate=30000 - -# maximum bandwidth (in + out) in a bundle, otherwise bundle split will be triggered -loadBalancerNamespaceBundleMaxBandwidthMbytes=100 - -# maximum number of bundles in a namespace -loadBalancerNamespaceMaximumBundles=128 - -# The broker resource usage threshold. -# When the broker resource usage is gratter than the pulsar cluster average resource usge, -# the threshold shedder will be triggered to offload bundles from the broker. -# It only take effect in ThresholdSheddler strategy. -loadBalancerBrokerThresholdShedderPercentage=10 - -# When calculating new resource usage, the history usage accounts for. -# It only take effect in ThresholdSheddler strategy. -loadBalancerHistoryResourcePercentage=0.9 - -# The BandWithIn usage weight when calculating new resourde usage. -# It only take effect in ThresholdShedder strategy. -loadBalancerBandwithInResourceWeight=1.0 - -# The BandWithOut usage weight when calculating new resourde usage. -# It only take effect in ThresholdShedder strategy. -loadBalancerBandwithOutResourceWeight=1.0 - -# The CPU usage weight when calculating new resourde usage. -# It only take effect in ThresholdShedder strategy. -loadBalancerCPUResourceWeight=1.0 - -# The heap memory usage weight when calculating new resourde usage. -# It only take effect in ThresholdShedder strategy. -loadBalancerMemoryResourceWeight=1.0 - -# The direct memory usage weight when calculating new resourde usage. -# It only take effect in ThresholdShedder strategy. -loadBalancerDirectMemoryResourceWeight=1.0 - -# Bundle unload minimum throughput threshold (MB), avoding bundle unload frequently. -# It only take effect in ThresholdShedder strategy. -loadBalancerBundleUnloadMinThroughputThreshold=10 - -### --- Replication --- ### - -# Enable replication metrics -replicationMetricsEnabled=true - -# Max number of connections to open for each broker in a remote cluster -# More connections host-to-host lead to better throughput over high-latency -# links. -replicationConnectionsPerBroker=16 - -# Replicator producer queue size -replicationProducerQueueSize=1000 - -# Duration to check replication policy to avoid replicator inconsistency -# due to missing ZooKeeper watch (disable with value 0) -replicationPolicyCheckDurationSeconds=600 - -# Default message retention time -defaultRetentionTimeInMinutes=0 - -# Default retention size -defaultRetentionSizeInMB=0 - -# How often to check whether the connections are still alive -keepAliveIntervalSeconds=30 - -### --- WebSocket --- ### - -# Enable the WebSocket API service in broker -webSocketServiceEnabled=true - -# Number of IO threads in Pulsar Client used in WebSocket proxy -webSocketNumIoThreads=8 - -# Number of connections per Broker in Pulsar Client used in WebSocket proxy -webSocketConnectionsPerBroker=8 - -# Time in milliseconds that idle WebSocket session times out -webSocketSessionIdleTimeoutMillis=300000 - -# The maximum size of a text message during parsing in WebSocket proxy -webSocketMaxTextFrameSize=1048576 - -### --- Metrics --- ### - -# Enable topic level metrics -exposeTopicLevelMetricsInPrometheus=true - -# Classname of Pluggable JVM GC metrics logger that can log GC specific metrics -# jvmGCMetricsLoggerClassName= - -### --- Broker Web Stats --- ### - -# Enable topic level metrics -exposePublisherStats=true - -# Enable expose the precise backlog stats. -# Set false to use published counter and consumed counter to calculate, this would be more efficient but may be inaccurate. -# Default is false. -exposePreciseBacklogInPrometheus=false - -### --- Deprecated config variables --- ### - -# Deprecated. Use configurationStoreServers -globalZookeeperServers= - -# Deprecated. Use brokerDeleteInactiveTopicsFrequencySeconds -brokerServicePurgeInactiveFrequencyInSeconds=60 - -### --- BookKeeper Configuration --- ##### - -ledgerStorageClass=org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage - -# The maximum netty frame size in bytes. Any message received larger than this will be rejected. The default value is 5MB. -nettyMaxFrameSizeBytes=134217728 - -# Size of Write Cache. Memory is allocated from JVM direct memory. -# Write cache is used to buffer entries before flushing into the entry log -# For good performance, it should be big enough to hold a substantial amount -# of entries in the flush interval -# By default it will be allocated to 1/4th of the available direct memory -dbStorage_writeCacheMaxSizeMb= - -# Size of Read cache. Memory is allocated from JVM direct memory. -# This read cache is pre-filled doing read-ahead whenever a cache miss happens -# By default it will be allocated to 1/4th of the available direct memory -dbStorage_readAheadCacheMaxSizeMb= - -# How many entries to pre-fill in cache after a read cache miss -dbStorage_readAheadCacheBatchSize=1000 - -flushInterval=60000 - -## RocksDB specific configurations -## DbLedgerStorage uses RocksDB to store the indexes from -## (ledgerId, entryId) -> (entryLog, offset) - -# Size of RocksDB block-cache. For best performance, this cache -# should be big enough to hold a significant portion of the index -# database which can reach ~2GB in some cases -# Default is to use 10% of the direct memory size -dbStorage_rocksDB_blockCacheSize= - -# Other RocksDB specific tunables -dbStorage_rocksDB_writeBufferSizeMB=4 -dbStorage_rocksDB_sstSizeInMB=4 -dbStorage_rocksDB_blockSize=4096 -dbStorage_rocksDB_bloomFilterBitsPerKey=10 -dbStorage_rocksDB_numLevels=-1 -dbStorage_rocksDB_numFilesInLevel0=4 -dbStorage_rocksDB_maxSizeInLevel1MB=256 - -# Maximum latency to impose on a journal write to achieve grouping -journalMaxGroupWaitMSec=1 - -# Should the data be fsynced on journal before acknowledgment. -journalSyncData=false - - -# For each ledger dir, maximum disk space which can be used. -# Default is 0.95f. i.e. 95% of disk can be used at most after which nothing will -# be written to that partition. If all ledger dir partions are full, then bookie -# will turn to readonly mode if 'readOnlyModeEnabled=true' is set, else it will -# shutdown. -# Valid values should be in between 0 and 1 (exclusive). -diskUsageThreshold=0.99 - -# The disk free space low water mark threshold. -# Disk is considered full when usage threshold is exceeded. -# Disk returns back to non-full state when usage is below low water mark threshold. -# This prevents it from going back and forth between these states frequently -# when concurrent writes and compaction are happening. This also prevent bookie from -# switching frequently between read-only and read-writes states in the same cases. -diskUsageWarnThreshold=0.99 - -# Whether the bookie allowed to use a loopback interface as its primary -# interface(i.e. the interface it uses to establish its identity)? -# By default, loopback interfaces are not allowed as the primary -# interface. -# Using a loopback interface as the primary interface usually indicates -# a configuration error. For example, its fairly common in some VPS setups -# to not configure a hostname, or to have the hostname resolve to -# 127.0.0.1. If this is the case, then all bookies in the cluster will -# establish their identities as 127.0.0.1:3181, and only one will be able -# to join the cluster. For VPSs configured like this, you should explicitly -# set the listening interface. -allowLoopback=true - -# How long the interval to trigger next garbage collection, in milliseconds -# Since garbage collection is running in background, too frequent gc -# will heart performance. It is better to give a higher number of gc -# interval if there is enough disk capacity. -gcWaitTime=300000 - -# Enable topic auto creation if new producer or consumer connected (disable auto creation with value false) -allowAutoTopicCreation=true - -# The type of topic that is allowed to be automatically created.(partitioned/non-partitioned) -allowAutoTopicCreationType=non-partitioned - -# Enable subscription auto creation if new consumer connected (disable auto creation with value false) -allowAutoSubscriptionCreation=true - -# The number of partitioned topics that is allowed to be automatically created if allowAutoTopicCreationType is partitioned. -defaultNumPartitions=1 - -### --- Transaction config variables --- ### -transactionMetadataStoreProviderClassName=org.apache.pulsar.transaction.coordinator.impl.InMemTransactionMetadataStoreProvider diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/rabbitmq/enabled_plugins b/deploy/docker-compose/docker-deploy/training_template/backends/spark/rabbitmq/enabled_plugins deleted file mode 100644 index be0a921c97..0000000000 --- a/deploy/docker-compose/docker-deploy/training_template/backends/spark/rabbitmq/enabled_plugins +++ /dev/null @@ -1 +0,0 @@ -[rabbitmq_federation_management,rabbitmq_federation]. \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/training_template/backends/spark/spark/spark-defaults.conf b/deploy/docker-compose/docker-deploy/training_template/backends/spark/spark/spark-defaults.conf deleted file mode 100644 index fdeaf62c4c..0000000000 --- a/deploy/docker-compose/docker-deploy/training_template/backends/spark/spark/spark-defaults.conf +++ /dev/null @@ -1,4 +0,0 @@ -spark.master spark://spark-master:7077 -#spark.eventLog.enabled true -#spark.eventLog.dir hdfs://namenode:9000/spark/logs -spark.cores.max 4 \ No newline at end of file diff --git a/deploy/docker-compose/docker-deploy/training_template/docker-compose-eggroll.yml b/deploy/docker-compose/docker-deploy/training_template/docker-compose-eggroll.yml index 46d35bd478..d563a5b8e0 100644 --- a/deploy/docker-compose/docker-deploy/training_template/docker-compose-eggroll.yml +++ b/deploy/docker-compose/docker-deploy/training_template/docker-compose-eggroll.yml @@ -43,16 +43,19 @@ services: osx: image: "${RegistryURI}${OSX_IMAGE}:${OSX_IMAGE_TAG}" restart: always + expose: + - 9370 ports: - "9370:9370" environment: PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION: python + JAVA_HOME: java volumes: - ./confs/osx/conf/:/data/projects/fate/osx/conf/broker/ - /etc/localtime:/etc/localtime:ro networks: - fate-network - command: ["sh", "-c", "java -XX:+UseG1GC -XX:G1HeapRegionSize=16m -XX:G1ReservePercent=25 -XX:InitiatingHeapOccupancyPercent=30 -XX:SoftRefLRUPolicyMSPerMB=0 -verbose:gc -Xloggc:/dev/shm/rmq_srv_gc_%p_%t.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintAdaptiveSizePolicy -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=30m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/oom/heapdump.hprof -server -Xms4g -Xmx4g -XX:-OmitStackTraceInFastThrow -XX:+AlwaysPreTouch -XX:MaxDirectMemorySize=15g -XX:-UseLargePages -XX:-UseBiasedLocking -cp conf/broker/:lib/*:extension/*:/data/projects/fate/osx/lib/osx-broker-1.0.0.jar org.fedai.osx.broker.Bootstrap -c /data/projects/fate/osx/conf"] + command: ["sh", "-c", "java -XX:+UseG1GC -XX:G1HeapRegionSize=16m -XX:G1ReservePercent=25 -XX:InitiatingHeapOccupancyPercent=30 -XX:SoftRefLRUPolicyMSPerMB=0 -verbose:gc -Xloggc:/dev/shm/rmq_srv_gc_%p_%t.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintAdaptiveSizePolicy -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=30m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/oom/heapdump.hprof -server -Xms4g -Xmx4g -XX:-OmitStackTraceInFastThrow -XX:+AlwaysPreTouch -XX:MaxDirectMemorySize=15g -XX:-UseLargePages -XX:-UseBiasedLocking -cp conf/broker/:lib/*:extension/*:/data/projects/fate/osx/lib/osx-broker-1.1.0.jar:pb_lib/v3/* org.fedai.osx.broker.Bootstrap -c /data/projects/fate/osx/conf"] fateboard: image: "${FATEBoard_IMAGE}:${FATEBoard_IMAGE_TAG}" @@ -60,13 +63,14 @@ services: ports: - "8080:8080" volumes: - - ./confs/fateboard/conf:/data/projects/fate/fateboard/conf - - fate_flow_logs:/data/projects/fate/fate_flow/logs + - ./confs/fate_board/conf:/data/projects/fate/fate_board/conf + - fate_flow_logs:/data/projects/fate/fate_flow/logs - /etc/localtime:/etc/localtime:ro networks: - fate-network depends_on: - fateflow + command: ["sh", "-c", "java -Dspring.config.location=/data/projects/fate/fate_board/conf/application.properties -Dssh_config_file=/data/projects/fate/fate_board/ssh/ -Xmx2048m -Xms2048m -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:gc.log -XX:+HeapDumpOnOutOfMemoryError -cp /data/projects/fate/fate_board/lib/*:/data/projects/fate/fate_board/fateboard-2.1.0.jar org.fedai.fate.board.bootstrap.Bootstrap"] clustermanager: image: "${EGGRoll_IMAGE}:${EGGRoll_IMAGE_TAG}" @@ -122,7 +126,6 @@ services: - ./confs/fate_flow/conf/pulsar_route_table.yaml:/data/projects/fate/fate_flow/conf/pulsar_route_table.yaml - ./confs/fate_flow/conf/rabbitmq_route_table.yaml:/data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml - ./confs/eggroll/conf:/data/projects/fate/eggroll/conf - - ./shared_dir/data/model_local_cache:/data/projects/fate/fate_flow/model_local_cache - /etc/localtime:/etc/localtime:ro depends_on: - mysql @@ -143,27 +146,11 @@ services: - "-c" - | set -x - pip install cryptography && sleep 5 && python fate_flow/python/fate_flow/fate_flow_server.py --debug - client: - image: "${Client_IMAGE}:${Client_IMAGE_TAG}" - ports: - - "20000:20000" - restart: always - environment: - FATE_FLOW_IP: "fateflow" - FATE_FLOW_PORT: "9380" - FATE_SERVING_HOST: "fate-serving:8059" - NOTEBOOK_HASHED_PASSWORD: "${NOTEBOOK_HASHED_PASSWORD}" - volumes: - - download_dir:/data/projects/fate/download_dir - - shared_dir_examples:/data/projects/fate/examples - - /etc/localtime:/etc/localtime:ro - depends_on: - - fateflow - networks: - - fate-network - command: ["bash", "-c", "pipeline init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && flow init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && jupyter notebook --ip=0.0.0.0 --port=20000 --allow-root --debug --NotebookApp.notebook_dir='/data/projects/fate/' --no-browser --NotebookApp.token='' --NotebookApp.password=$${NOTEBOOK_HASHED_PASSWORD} "] - + pip install cryptography && sleep 5 && python fate_flow/python/fate_flow/fate_flow_server.py --debug + python /data/projects/fate/eggroll/python/setup.py install + sleep 10 && pipeline init --ip fateflow --port 9380 + flow init --ip fateflow --port 9380 + mysql: image: "${MySQL_IMAGE}:${MySQL_IMAGE_TAG}" expose: diff --git a/deploy/docker-compose/docker-deploy/training_template/docker-compose-exchange.yml b/deploy/docker-compose/docker-deploy/training_template/docker-compose-exchange.yml deleted file mode 100644 index b6cb916ba3..0000000000 --- a/deploy/docker-compose/docker-deploy/training_template/docker-compose-exchange.yml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2019-2022 VMware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# you may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -version: '3.7' - -services: - exchange: - image: "federatedai/eggroll:${TAG}" - restart: always - ports: - - "9371:9370" - volumes: - - ./conf:/data/projects/fate/eggroll/conf - - /etc/localtime:/etc/localtime:ro - command: ["bash", "-c", "java -Dlog4j.configurationFile=$${EGGROLL_HOME}/conf/log4j2.properties -cp $${EGGROLL_HOME}/lib/*:$${EGGROLL_HOME}/conf/ com.webank.eggroll.rollsite.EggSiteBootstrap -c $${EGGROLL_HOME}/conf/eggroll.properties"] diff --git a/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark-slim.yml b/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark-slim.yml deleted file mode 100644 index d316943672..0000000000 --- a/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark-slim.yml +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2019-2022 VMware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# you may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -version: "3.7" - -networks: - fate-network: - ipam: - config: - - subnet: 192.167.0.0/16 - -volumes: - fate_flow_logs: - download_dir: - shared_dir_examples: - driver: local - driver_opts: - type: none - o: bind - device: /examples - shared_dir_fate: - driver: local - driver_opts: - type: none - o: bind - device: /fate - shared_dir_data: - driver: local - driver_opts: - type: none - o: bind - device: /data - -services: - fateboard: - image: "${FATEBoard_IMAGE}:${FATEBoard_IMAGE_TAG}" - ports: - - "8080:8080" - volumes: - - ./confs/fateboard/conf:/data/projects/fate/fateboard/conf - - fate_flow_logs:/data/projects/fate/fate_flow/logs - - /etc/localtime:/etc/localtime:ro - restart: always - networks: - - fate-network - depends_on: - - fateflow - - fateflow: - image: "${FATEFlow_IMAGE}:${FATEFlow_IMAGE_TAG}" - restart: always - ports: - - 9380:9380 - - 9360:9360 - volumes: - - ./confs/spark/spark-defaults.conf:/data/projects/spark-3.1.3-bin-hadoop3.2/conf/spark-defaults.conf - - shared_dir_fate:/data/projects/fate/fate - - shared_dir_examples:/data/projects/fate/examples - - download_dir:/data/projects/fate/fate/python/download_dir - - fate_flow_logs:/data/projects/fate/fate_flow/logs - - ./confs/fate_flow/conf/service_conf.yaml:/data/projects/fate/fate_flow/conf/service_conf.yaml - - ./confs/fate_flow/conf/pulsar_route_table.yaml:/data/projects/fate/fate_flow/conf/pulsar_route_table.yaml - - ./confs/fate_flow/conf/rabbitmq_route_table.yaml:/data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml - - ./confs/eggroll/conf:/data/projects/fate/eggroll/conf - - ./shared_dir/data/model_local_cache:/data/projects/fate/fate_flow/model_local_cache - - /etc/localtime:/etc/localtime:ro - networks: - fate-network: - ipv4_address: 192.167.0.100 - healthcheck: - test: ["CMD", "curl", "-f", "-X GET", "http://192.167.0.100:9380/v2/server/fateflow"] - interval: 1m30s - timeout: 10s - retries: 3 - start_period: 40s - command: - - "/bin/bash" - - "-c" - - | - set -x - sed -i "s/int(party.party_id)/str(party.party_id)/g" /data/projects/fate/fate/python/fate/arch/federation/backends/pulsar/_federation.py - cp /data/projects/fate/fate_flow/conf/pulsar_route_table.yaml /data/projects/fate/fate_flow/pulsar_route_table.yaml - cp /data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml /data/projects/fate/fate_flow/rabbitmq_route_table.yaml - sleep 5 && python fate_flow/python/fate_flow/fate_flow_server.py - environment: - FATE_PROJECT_BASE: "/data/projects/fate" - FATE_FLOW_UPLOAD_MAX_NUM: "1000000" - FATE_FLOW_UPLOAD_MAX_BYTES: "104868093952" - FATE_LOG_LEVEL: "INFO" - - mysql: - image: "${MySQL_IMAGE}:${MySQL_IMAGE_TAG}" - expose: - - 3306 - volumes: - - ./confs/mysql/init:/docker-entrypoint-initdb.d/ - - ./shared_dir/data/mysql:/var/lib/mysql - - /etc/localtime:/etc/localtime:ro - restart: always - environment: - MYSQL_ALLOW_EMPTY_PASSWORD: "yes" - networks: - - fate-network - cap_add: - - SYS_NICE - - osx: - image: "${RegistryURI}${OSX_IMAGE}:${OSX_IMAGE_TAG}" - restart: always - ports: - - "9370:9370" - environment: - PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION: python - volumes: - - ./confs/osx/conf/:/data/projects/fate/osx/conf/broker/ - - /etc/localtime:/etc/localtime:ro - networks: - - fate-network - command: ["sh", "-c", "java -XX:+UseG1GC -XX:G1HeapRegionSize=16m -XX:G1ReservePercent=25 -XX:InitiatingHeapOccupancyPercent=30 -XX:SoftRefLRUPolicyMSPerMB=0 -verbose:gc -Xloggc:/dev/shm/rmq_srv_gc_%p_%t.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintAdaptiveSizePolicy -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=30m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/oom/heapdump.hprof -server -Xms4g -Xmx4g -XX:-OmitStackTraceInFastThrow -XX:+AlwaysPreTouch -XX:MaxDirectMemorySize=15g -XX:-UseLargePages -XX:-UseBiasedLocking -cp conf/broker/:lib/*:extension/*:/data/projects/fate/osx/lib/osx-broker-1.0.0.jar org.fedai.osx.broker.Bootstrap -c /data/projects/fate/osx/conf"] - - # rabbitmq: - # image: "${RabbitMQ_IMAGE}:${RabbitMQ_IMAGE_TAG}" - # ports: - # - "5672:5672" - # - "15672:15672" - # environment: - # RABBITMQ_DEFAULT_USER: fate - # RABBITMQ_DEFAULT_PASS: fate - # RABBITMQ_USER: fate - # RABBITMQ_PASSWORD: fate - # RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit max_message_size 536870912" - # volumes: - # - ./confs/rabbitmq/enabled_plugins:/etc/rabbitmq/enabled_plugins - # - ./shared_dir/data/rabbitmq:/var/lib/rabbitmq - # restart: always - # networks: - # - fate-network - - # pulsar: - # image: "${Pulsar_IMAGE}:${Pulsar_IMAGE_TAG}" - # ports: - # - "6650:6650" - # - "6651:6651" - # - "8001:8080" - # volumes: - # - ./confs/pulsar/standalone.conf:/pulsar/conf/standalone.conf - # # - ./shared_dir/data/pulsar:/pulsar/data - # - /etc/localtime:/etc/localtime:ro - # command: - # ["/bin/bash", "-c", "bin/pulsar standalone -nss"] - # restart: always - # networks: - # - fate-network - - client: - image: "${Client_IMAGE}:${Client_IMAGE_TAG}" - ports: - - "20000:20000" - restart: always - environment: - FATE_FLOW_IP: "fateflow" - FATE_FLOW_PORT: "9380" - FATE_SERVING_HOST: "fate-serving:8059" - NOTEBOOK_HASHED_PASSWORD: "${NOTEBOOK_HASHED_PASSWORD}" - volumes: - - download_dir:/data/projects/fate/download_dir - - shared_dir_examples:/data/projects/fate/examples - - /etc/localtime:/etc/localtime:ro - depends_on: - - fateflow - networks: - - fate-network - command: ["bash", "-c", "pipeline init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && flow init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && jupyter notebook --ip=0.0.0.0 --port=20000 --allow-root --debug --NotebookApp.notebook_dir='/data/projects/fate/' --no-browser --NotebookApp.token='' --NotebookApp.password=$${NOTEBOOK_HASHED_PASSWORD} "] diff --git a/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark.yml b/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark.yml deleted file mode 100644 index 1969180351..0000000000 --- a/deploy/docker-compose/docker-deploy/training_template/docker-compose-spark.yml +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright 2019-2022 VMware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# you may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -version: "3.7" - -networks: - fate-network: - ipam: - config: - - subnet: 192.167.0.0/16 - -volumes: - fate_flow_logs: - download_dir: - shared_dir_examples: - driver: local - driver_opts: - type: none - o: bind - device: /examples - shared_dir_fate: - driver: local - driver_opts: - type: none - o: bind - device: /fate - shared_dir_data: - driver: local - driver_opts: - type: none - o: bind - device: /data - -services: - fateboard: - image: "${FATEBoard_IMAGE}:${FATEBoard_IMAGE_TAG}" - ports: - - "8080:8080" - volumes: - - ./confs/fateboard/conf:/data/projects/fate/fateboard/conf - - fate_flow_logs:/data/projects/fate/fate_flow/logs - - /etc/localtime:/etc/localtime:ro - networks: - - fate-network - restart: always - depends_on: - - fateflow - - fateflow: - image: "${FATEFlow_IMAGE}:${FATEFlow_IMAGE_TAG}" - restart: always - ports: - - 9380:9380 - - 9360:9360 - volumes: - - ./confs/spark/spark-defaults.conf:/data/projects/spark-3.1.3-bin-hadoop3.2/conf/spark-defaults.conf - - shared_dir_fate:/data/projects/fate/fate - - shared_dir_examples:/data/projects/fate/examples - - download_dir:/data/projects/fate/fate/python/download_dir - - fate_flow_logs:/data/projects/fate/fate_flow/logs - - ./confs/fate_flow/conf/service_conf.yaml:/data/projects/fate/fate_flow/conf/service_conf.yaml - - ./confs/fate_flow/conf/pulsar_route_table.yaml:/data/projects/fate/fate_flow/conf/pulsar_route_table.yaml - - ./confs/fate_flow/conf/rabbitmq_route_table.yaml:/data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml - - ./confs/eggroll/conf:/data/projects/fate/eggroll/conf - - ./shared_dir/data/model_local_cache:/data/projects/fate/fate_flow/model_local_cache - - /etc/localtime:/etc/localtime:ro - networks: - fate-network: - ipv4_address: 192.167.0.100 - healthcheck: - test: ["CMD", "curl", "-f", "-X GET", "http://192.167.0.100:9380/v2/server/fateflow"] - interval: 1m30s - timeout: 10s - retries: 3 - start_period: 40s - command: - - "/bin/bash" - - "-c" - - | - set -x - sed -i "s/int(party.party_id)/str(party.party_id)/g" /data/projects/fate/fate/python/fate/arch/federation/backends/pulsar/_federation.py - cp /data/projects/fate/fate_flow/conf/pulsar_route_table.yaml /data/projects/fate/fate_flow/pulsar_route_table.yaml - cp /data/projects/fate/fate_flow/conf/rabbitmq_route_table.yaml /data/projects/fate/fate_flow/rabbitmq_route_table.yaml - sleep 5 && python fate_flow/python/fate_flow/fate_flow_server.py - environment: - FATE_PROJECT_BASE: "/data/projects/fate" - FATE_FLOW_UPLOAD_MAX_NUM: "1000000" - FATE_FLOW_UPLOAD_MAX_BYTES: "104868093952" - FATE_LOG_LEVEL: "INFO" - - namenode: - image: "${Hadoop_NameNode_IMAGE}:${Hadoop_NameNode_IMAGE_TAG}" - restart: always - ports: - - 9000:9000 - - 9870:9870 - volumes: - - ./shared_dir/data/namenode:/hadoop/dfs/name - - ./confs/hadoop/core-site.xml:/etc/hadoop/core-site.xml - - /etc/localtime:/etc/localtime:ro - env_file: - - ./confs/hadoop/hadoop.env - environment: - - CLUSTER_NAME=fate - networks: - - fate-network - - datanode-0: - image: "${Hadoop_DataNode_IMAGE}:${Hadoop_DataNode_IMAGE_TAG}" - restart: always - volumes: - - /etc/localtime:/etc/localtime:ro - - ./shared_dir/data/datanode-0:/hadoop/dfs/data - environment: - SERVICE_PRECONDITION: "namenode:9000" - env_file: - - ./confs/hadoop/hadoop.env - networks: - - fate-network - - datanode-1: - image: "${Hadoop_DataNode_IMAGE}:${Hadoop_DataNode_IMAGE_TAG}" - restart: always - volumes: - - /etc/localtime:/etc/localtime:ro - - ./shared_dir/data/datanode-1:/hadoop/dfs/data - environment: - SERVICE_PRECONDITION: "namenode:9000" - env_file: - - ./confs/hadoop/hadoop.env - networks: - - fate-network - - datanode-2: - image: "${Hadoop_DataNode_IMAGE}:${Hadoop_DataNode_IMAGE_TAG}" - restart: always - volumes: - - /etc/localtime:/etc/localtime:ro - - ./shared_dir/data/datanode-2:/hadoop/dfs/data - environment: - SERVICE_PRECONDITION: "namenode:9000" - env_file: - - ./confs/hadoop/hadoop.env - networks: - - fate-network - - spark-master: - image: "${Spark_Master_IMAGE}:${Spark_Master_IMAGE_TAG}" - restart: always - ports: - - "8888:8080" - - "7077:7077" - volumes: - - /etc/localtime:/etc/localtime:ro - environment: - INIT_DAEMON_STEP: setup_spark - networks: - - fate-network - - spark-worker: - image: "${Spark_Worker_IMAGE}:${Spark_Worker_IMAGE_TAG}" - restart: always - depends_on: - - spark-master - ports: - - "8081:8081" - environment: - SPARK_MASTER: "spark://spark-master:7077" - volumes: - - ./confs/fate_flow/conf:/data/projects/fate/conf - - /etc/localtime:/etc/localtime:ro - networks: - - fate-network - - rabbitmq: - image: "${RabbitMQ_IMAGE}:${RabbitMQ_IMAGE_TAG}" - ports: - - "5672:5672" - - "15672:15672" - environment: - RABBITMQ_DEFAULT_USER: fate - RABBITMQ_DEFAULT_PASS: fate - RABBITMQ_USER: fate - RABBITMQ_PASSWORD: fate - RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit max_message_size 536870912" - volumes: - - ./confs/rabbitmq/enabled_plugins:/etc/rabbitmq/enabled_plugins - - ./shared_dir/data/rabbitmq:/var/lib/rabbitmq - - /etc/localtime:/etc/localtime:ro - restart: always - networks: - - fate-network - - pulsar: - image: "${Pulsar_IMAGE}:${Pulsar_IMAGE_TAG}" - ports: - - "6650:6650" - - "6651:6651" - - "8001:8080" - user: root - volumes: - - ./confs/pulsar/standalone.conf:/pulsar/conf/standalone.conf - # - ./shared_dir/data/pulsar:/pulsar/data - - /etc/localtime:/etc/localtime:ro - command: - ["/bin/bash", "-c", "bin/pulsar standalone -nss"] - restart: always - networks: - - fate-network - - mysql: - image: "${MySQL_IMAGE}:${MySQL_IMAGE_TAG}" - expose: - - 3306 - volumes: - - ./confs/mysql/init:/docker-entrypoint-initdb.d/ - - ./shared_dir/data/mysql:/var/lib/mysql - - /etc/localtime:/etc/localtime:ro - restart: always - environment: - MYSQL_ALLOW_EMPTY_PASSWORD: "yes" - networks: - - fate-network - cap_add: - - SYS_NICE - - nginx: - image: "${Nginx_IMAGE}:${Nginx_IMAGE_TAG}" - ports: - - 9300:9300 - - 9310:9310 - volumes: - - ./confs/nginx/route_table.yaml:/data/projects/fate/proxy/nginx/conf/route_table.yaml - - ./confs/nginx/nginx.conf:/data/projects/fate/proxy/nginx/conf/nginx.conf - - /etc/localtime:/etc/localtime:ro - restart: always - networks: - - fate-network - depends_on: - - fateflow - - client: - image: "${Client_IMAGE}:${Client_IMAGE_TAG}" - ports: - - "20000:20000" - restart: always - environment: - FATE_FLOW_IP: "fateflow" - FATE_FLOW_PORT: "9380" - FATE_SERVING_HOST: "fate-serving:8059" - NOTEBOOK_HASHED_PASSWORD: "${NOTEBOOK_HASHED_PASSWORD}" - volumes: - - download_dir:/data/projects/fate/download_dir - - shared_dir_examples:/data/projects/fate/examples - - /etc/localtime:/etc/localtime:ro - depends_on: - - fateflow - networks: - - fate-network - command: ["bash", "-c", "pipeline init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && flow init --ip $${FATE_FLOW_IP} --port $${FATE_FLOW_PORT} && jupyter notebook --ip=0.0.0.0 --port=20000 --allow-root --debug --NotebookApp.notebook_dir='/data/projects/fate/' --no-browser --NotebookApp.token='' --NotebookApp.password=$${NOTEBOOK_HASHED_PASSWORD} "] diff --git a/deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/application.properties b/deploy/docker-compose/docker-deploy/training_template/public/fate_board/conf/application.properties similarity index 100% rename from deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/application.properties rename to deploy/docker-compose/docker-deploy/training_template/public/fate_board/conf/application.properties diff --git a/deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/ssh.properties b/deploy/docker-compose/docker-deploy/training_template/public/fate_board/conf/ssh.properties similarity index 100% rename from deploy/docker-compose/docker-deploy/training_template/public/fateboard/conf/ssh.properties rename to deploy/docker-compose/docker-deploy/training_template/public/fate_board/conf/ssh.properties